@inproceedings{fe3198c7896b4a4dac744486cf0cd743,
title = "Automated vocal emotion recognition using phoneme class specific features",
abstract = "Methods for automated vocal emotion recognition often use acoustic feature vectors that are computed for each frame in an utterance, and global statistics based on these acoustic feature vectors. However, at least two considerations argue for usage of phoneme class specific features for emotion recognition. First, there are well-known effects of phoneme class on some of these features. Second, it is plausible that emotion influences the speech signal in ways that differ between phoneme classes. A new method based on the concept of phoneme class specific features is proposed in which different features are selected for regions associated with different phoneme classes and then optimally combined, using machine learning algorithms. A small but significant improvement was found when this method was compared with an otherwise identical method in which features were used uniformly over different phoneme classes.",
keywords = "Biomedical application, Emotion recognition, Phoneme class specific features",
author = "G{\'e}za Kiss and {Van Santen}, Jan",
note = "Copyright: Copyright 2020 Elsevier B.V., All rights reserved.",
year = "2010",
language = "English (US)",
series = "Proceedings of the 11th Annual Conference of the International Speech Communication Association, INTERSPEECH 2010",
publisher = "International Speech Communication Association",
pages = "1161--1164",
booktitle = "Proceedings of the 11th Annual Conference of the International Speech Communication Association, INTERSPEECH 2010",
}