File size: 87,147 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 |
{
"paper_id": "O04-3004",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:00:56.977228Z"
},
"title": "Multi-Modal Emotion Recognition from Speech and Text",
"authors": [
{
"first": "Ze-Jing",
"middle": [],
"last": "Chuang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Cheng Kung University",
"location": {
"settlement": "Tainan",
"country": "Taiwan, ROC"
}
},
"email": ""
},
{
"first": "Chung-Hsien",
"middle": [],
"last": "Wu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Cheng Kung University",
"location": {
"settlement": "Tainan",
"country": "Taiwan, ROC"
}
},
"email": "chwu@csie.ncku.edu.tw"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper presents an approach to emotion recognition from speech signals and textual content. In the analysis of speech signals, thirty-three acoustic features are extracted from the speech input. After Principle Component Analysis (PCA) is performed, 14 principle components are selected for discriminative representation. In this representation, each principle component is the combination of the 33 original acoustic features and forms a feature subspace. Support Vector Machines (SVMs) are adopted to classify the emotional states. In text analysis, all emotional keywords and emotion modification words are manually defined. The emotion intensity levels of emotional keywords and emotion modification words are estimated based on a collected emotion corpus. The final emotional state is determined based on the emotion outputs from the acoustic and textual analyses. Experimental results show that the emotion recognition accuracy of the integrated system is better than that of either of the two individual approaches.",
"pdf_parse": {
"paper_id": "O04-3004",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper presents an approach to emotion recognition from speech signals and textual content. In the analysis of speech signals, thirty-three acoustic features are extracted from the speech input. After Principle Component Analysis (PCA) is performed, 14 principle components are selected for discriminative representation. In this representation, each principle component is the combination of the 33 original acoustic features and forms a feature subspace. Support Vector Machines (SVMs) are adopted to classify the emotional states. In text analysis, all emotional keywords and emotion modification words are manually defined. The emotion intensity levels of emotional keywords and emotion modification words are estimated based on a collected emotion corpus. The final emotional state is determined based on the emotion outputs from the acoustic and textual analyses. Experimental results show that the emotion recognition accuracy of the integrated system is better than that of either of the two individual approaches.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Human-machine interface technology has been investigated for several decades. Recent research has placed more emphasis on the recognition of nonverbal information, and has especially focused on emotion reaction. Many kinds of physiological characteristics are used to extract emotions, such as voice, facial expressions, hand gestures, body movements, heartbeat and blood pressure. Scientists have found that emotion technology can be an important component in artificial intelligence [Salovey et al. 1990] , especially for human-human communication. Although human-computer interaction is different from human-human communication, some theories show that human-computer interaction shares basic characteristics with human-human interaction [Reeves et al. 1996] . In addition, affective information is pervasive in electronic documents, such as digital news reports, economic reports, e-mail, etc. The conclusions reached by researchers with respect to emotion can be extended to other types of subjective information [Subasic et al. 2001] . For example, education assistance software should be able to detect the emotions of users and; therefore; choose suitable teaching courses. Moreover, the study of emotions can apply to some assistance systems, such as virtual babysitting systems or virtual psychologist systems.",
"cite_spans": [
{
"start": 485,
"end": 506,
"text": "[Salovey et al. 1990]",
"ref_id": "BIBREF0"
},
{
"start": 741,
"end": 761,
"text": "[Reeves et al. 1996]",
"ref_id": "BIBREF1"
},
{
"start": 1018,
"end": 1039,
"text": "[Subasic et al. 2001]",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "In recent years, several research works have focused on emotion recognition. Cohn and Katz [Cohn et al. 1998 ] developed a semi-automated method for emotion recognition from faces and voices. Silva [Silva et al. 2000] used the HMM structure to recognize emotion from both video and audio sources. Yoshitomi [Yoshitomi et al. 2000] combined the hidden Markov model (HMM) and neural networks to extract emotion from speech and facial expressions. Other researchers focused on extracting emotion from speech data only. Fukuda and Kostov [Fukuda et al. 1999 ] applied a wavelet/cepstrum-based software tool to perform emotion recognition from speech. Yu [Yu et al. 2001 ] developed a support vector machine (SVM)-based emotion recognition system. However, few approaches have focused on emotion recognition from textual input. Textual information is another important communication medium and can be retrieved from many sources, such as books, newspapers, web pages, e-mail messages, etc. It is not only the most popular communication medium, but also rich in emotion. With the help of natural language processing techniques, emotions can be extracted from textual input by analyzing punctuation, emotional keywords, syntactic structure, semantic information, etc. In [Chuang et al. 2002] , the authors developed a semantic network for performing emotion recognition from textual content. That investigation focused on the use of textual information in emotion recognition systems. For example, the identification of emotional keywords in a sentence is very helpful to decide the emotional state of the sentence.",
"cite_spans": [
{
"start": 77,
"end": 85,
"text": "Cohn and",
"ref_id": null
},
{
"start": 91,
"end": 108,
"text": "[Cohn et al. 1998",
"ref_id": "BIBREF3"
},
{
"start": 198,
"end": 217,
"text": "[Silva et al. 2000]",
"ref_id": "BIBREF4"
},
{
"start": 307,
"end": 330,
"text": "[Yoshitomi et al. 2000]",
"ref_id": "BIBREF5"
},
{
"start": 534,
"end": 553,
"text": "[Fukuda et al. 1999",
"ref_id": "BIBREF6"
},
{
"start": 650,
"end": 665,
"text": "[Yu et al. 2001",
"ref_id": "BIBREF7"
},
{
"start": 1264,
"end": 1284,
"text": "[Chuang et al. 2002]",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "A possible application of textual emotion recognition is the on-line chat system. With many on-line chat systems, users are allowed to communicate with each other by typing or speaking.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "A system can recognize a user's emotion and give an appropriate response.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "In this paper, a multi-modal emotion recognition system is constructed to extract emotion information from both speech and text input. The emotion recognition system classifies emotions according to six basic types: happiness, sadness, anger, fear, surprise and disgust. If the emotion intensity value of the currently recognized emotion is lower than a predefined threshold, the emotion output is determined to be neutral. The proposed emotion recognition system can detect emotions from two different types of information: speech and text. To evaluate the acoustic approach, a broadcast drama, including speech signal and textual content, is adopted as the training corpus instead of artificial emotional speech. During feature selection, an initial acoustic feature set that contained 33 features is first analyzed and extracted. These acoustic features contain several possible characteristics, such as intonation, timbre, acoustics, tempo, and rhythm. We also extract some features to represent special intonations, such as trembling speech, unvoiced speech, and crying speech. Finally, among these diverse features, the most significant features are selected by means of principle component analysis (PCA) to form an acoustic feature vector. The acoustic feature vector is fed to the Support Vector Machines (SVMs) to determine the emotion output according to hyperplanes determined by the training corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "For emotion recognition via text, we assume that the emotional reaction of an input sentence is essentially represented by its word appearance. Two primary word types, \"emotional keywords\" and \"emotion modification words,\" are manually defined and used to extract emotion from the input sentence. All the extracted emotional keywords and emotion modification words have their corresponding \"emotion descriptors\" and \"emotion modification values.\" For each input sentence, the emotion descriptors are averaged and modified using the emotion modification values to give the current emotion output. Finally, the outputs of the textual and acoustic approaches are combined with the emotion history to give the final emotion output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The rest of the paper is organized as follows. Section 2 describes the module for recognizing emotions from speech signals. The details of SVM classification model is also provided in this section. Then the textual emotion recognition module and the integration of these two modules are presented in sections 2.3 and 3, respectively. Finally, experimental results obtained using the integrated emotion recognition system are provided in section 5, and some conclusions are drawn in section 6.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Deciding on appropriate acoustic features is a crucial step in emotion recognition. As in similar research, this study adopts the pitch and energy features and their derivatives. In addition, some additional characteristics may be found in emotional speech, such as trembling speech, unvoiced speech, varying speech duration, and hesitation. These features are also extracted in our approach.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acoustic Emotion Recognition Module",
"sec_num": "2."
},
{
"text": "A diagram of the acoustic feature extraction approach is shown in Figure 1 . In the proposed approach, four basic acoustic features, pitch, energy, formant 1 (F1), and the zero crossing rate (ZCR), are estimated first. Previous research has shown that emotional reactions are strongly related to the pitch and energy of the speech. For example, the pitch of speech associated with anger or happiness is always higher than that associated with sadness or disappointment, and the energy associated with surprise or anger is also greater than that associated with fear.",
"cite_spans": [],
"ref_spans": [
{
"start": 66,
"end": 74,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Feature Extraction",
"sec_num": "2.1"
},
{
"text": "To extract an appropriate feature set, a short-time processing technique is first applied.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "The contours of the acoustic features are used to represent the time-varying feature characteristics. Each contour can be represented by its mean, slope, and slope difference. The Legendre polynomial [Abramowitz et al. 1972 ] is adopted to represent the contours of these four features.",
"cite_spans": [
{
"start": 200,
"end": 223,
"text": "[Abramowitz et al. 1972",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "In feature extraction, we adopt several parameters that are based on pitch and energy. We extract 33 acoustic features in the following 13 categories:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(1) 4 th -order Legendre parameters for the pitch contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(2) 4 th -order Legendre parameters for the energy contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(3) 4 th -order Legendre parameters for the formant one (F1) contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(4) 4 th -order Legendre parameters for the zero crossing rate (ZCR) contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(5) maximum energy;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(6) maximum smoothed energy;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(7) minimum, median, and standard deviation of the pitch contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(8) minimum, median, and standard deviation of the energy contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(9) minimum, median, and standard deviation of the smoothed pitch contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(10) minimum, median, and standard deviation of the smoothed energy contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(11) ratio of the sample number of the upslope to that of the downslope for the pitch contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(12) ratio of the sample number of upslope to that of the downslope for the energy contour;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "(13) pitch vibration.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "The features in categories (1) to (8) are statistical parameters of four basic acoustic features. In order to remove discontinuities from the contour, the pitch and energy features in categories (9) and (10) are smoothed using window method.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 1. Diagram of the acoustic feature extraction module.",
"sec_num": null
},
{
"text": "The ratios described in categories 11and 12represent not only the slope but also the shape of each vibration in the contour. Figure 2 shows the difference between these parameters. In this figure, each part shows the vibration of a contour. In order to show how the parameters are used, we assume that the length and the amplitude of these two contours are the same. In part A, the length of the upslope contour is longer than that of the downslope contour, while the opposite is shown in part B. The ratio of upslope to downslope is 3.14 (22 upslope samples to 7 downslope samples) in part A and 0.26 (6 upslope samples to 23 downslope samples) in part B.",
"cite_spans": [],
"ref_spans": [
{
"start": 125,
"end": 133,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Figure 2. The ratio of up-slope sample number to the down-slope sample number. Two contours with the same wavelength are shown in parts A and B; the square symbols indicate the up-slope sample, and the circle symbols indicate the down-slope sample.",
"sec_num": null
},
{
"text": "Trembling speech can be characterized by means of pitch vibration. For category (13), the pitch vibration is defined and calculated as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 2. The ratio of up-slope sample number to the down-slope sample number. Two contours with the same wavelength are shown in parts A and B; the square symbols indicate the up-slope sample, and the circle symbols indicate the down-slope sample.",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "() ( ) ( ) ( ) 1 0 1 1 N r i PPiPPiP N \u03b4 \u2212 = \uf8ee\uf8f9 =\u2212\u00d7+\u2212 \uf8f0\uf8fb \u2211 , [] 10 00 x x x \u03b4 < \uf8f1 = \uf8f2 \u2265 \uf8f3 ,",
"eq_num": "(1)"
}
],
"section": "Figure 2. The ratio of up-slope sample number to the down-slope sample number. Two contours with the same wavelength are shown in parts A and B; the square symbols indicate the up-slope sample, and the circle symbols indicate the down-slope sample.",
"sec_num": null
},
{
"text": "where P is the mean value of the pitch contour.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 2. The ratio of up-slope sample number to the down-slope sample number. Two contours with the same wavelength are shown in parts A and B; the square symbols indicate the up-slope sample, and the circle symbols indicate the down-slope sample.",
"sec_num": null
},
{
"text": "Principal component analysis (PCA) is a standard statistical approach that can be used to extract the main components from a set of parameters. As described in the previous section, an initial set of 33 features is firstly extracted. After PCA is performed, 14 dimensions of principle components are chosen to capture over 90% of the total variance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Principle Component Analysis",
"sec_num": "2.2"
},
{
"text": "Traditionally, the 14 dimensions of principle components are used to perform classification directly. But in our approach, the principle components are used to select a more detailed subspace. In PCA, each principle component is the linear combination of the original features. If a principle component is selected, the features that have larger combination weights are also selected and form a feature subspace. The combination weights of the original features are represented in the transformation matrix, which is calculated in PCA. By setting the threshold of the combination weights to a value of 0.2, we can select the significant features for each principle component to form a feature set. Therefore, we have 14 feature subspaces. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Principle Component Analysis",
"sec_num": "2.2"
},
{
"text": "The support vector machine (SVM) [Cristianini et al. 2001] has been widely applied in many research areas, such as data mining, pattern recognition, linear regression, and data clustering.",
"cite_spans": [
{
"start": 33,
"end": 58,
"text": "[Cristianini et al. 2001]",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "Given a set of data belonging to two classes, the basic idea of SVM is to find a hyperplane that can completely distinguish two different classes. The hyperplane is decided by the maximal margin of two classes, and the samples that lie in the margin are called \"support vectors.\" The equation of the hyperplane is described in Eq. 2:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "() ( ) \u2211 = +\u22c5= N i iii wxxyxD 1 0 \u03b1 .",
"eq_num": "(2)"
}
],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "Traditional SVMs can construct a hard decision boundary with no probability output. In this study, SVMs with continuous probability output are proposed. Given the test sample x', the probability that x' belongs to class c is P(class c |x'). This value is estimated based on the following factors: l the distance between the test input and the hyperplane,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( ) () xD w wxD R \u2032 = \u2032 = 1 ;",
"eq_num": "(3)"
}
],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "l the distance from the class centroid to the hyperplane,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "() ( ) () xD xD xD R R \u2032 == \u2032 ;",
"eq_num": "(4)"
}
],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "where x is the centroid of the training data in a class; ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "Finally, the output probability is defined as follows according to the above factors:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( ) ( ) () () \uf8f7 \uf8f7 \uf8f8 \uf8f6 \uf8ec \uf8ec \uf8ed \uf8eb \u2032 \u2212+ = \u2032 \u2212+ = \u2032 xD xD P R P xclassP cc c 1exp1 1exp1 .",
"eq_num": "(6)"
}
],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "As described above, the acoustic feature set is divided into 14 feature sub-spaces. For each sub-space, an SVM model is applied to decide on the best class of the speech input. The final output is the combination of these different SVM outputs, and shown as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( ) ( ) () () ( ) 1 1 1 1 1exp1 S S cic i S S c i PclassxPclassx P DxDx = = \uf8eb\uf8f6 \u2032\u2032 = \uf8ec\uf8f7 \uf8ed\uf8f8 \uf8eb\uf8f6 \uf8eb\uf8f6 \uf8ec\uf8f7 = \uf8ec\uf8f7 \uf8ec\uf8f7 \uf8ec\uf8f7 \u2032 +\u2212 \uf8ed\uf8f8 \uf8ed\uf8f8 \u220f \u220f ,",
"eq_num": "(7)"
}
],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "where the probability P i (class c |x') is the output of SVM in the i-th feature subspace and S (=14)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "is the number of sub-spaces.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Using SVM Models",
"sec_num": "2.3"
},
{
"text": "The most popular method for performing emotion recognition from text is to detect the appearance of emotional keywords. Generally, not only the word level but also the syntactic and semantic levels may contain emotional information. Figure 3 shows a diagram of the textual emotion recognition module. A front-end speech recognizer is first used to convert the speech signal into textual data. To extract the emotional state from the text input, we assume that every input sentence includes one or more emotional keywords and emotion modification words. The emotional keywords provide a basic emotion description of the input sentence, and the emotion modification words can enhance or suppress the emotional state. Finally, the final emotional state is determined by combining the recognition results from both textual content and speech signal. ",
"cite_spans": [],
"ref_spans": [
{
"start": 233,
"end": 241,
"text": "Figure 3",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Textual Emotion Recognition Module",
"sec_num": "3."
},
{
"text": "In order to transform the speech signal into textual data, a keyword spotting system [Wu et al. 2001 ] is applied first. The hidden Markov models (HMM) are adopted to perform keyword spotting, and the Mel-frequency cepstrum coefficients (MFCC) are extracted as the acoustic features. Obviously, the speech recognizer plays a very important role in the textual emotion recognition module. In our approach, since we consider only the emotional keywords and extract their corresponding information using HowNet, a keyword spotting system is adopted to spot the emotional keywords and emotion modification words.",
"cite_spans": [
{
"start": 85,
"end": 100,
"text": "[Wu et al. 2001",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Front-end Processor",
"sec_num": "3.1"
},
{
"text": "For each emotional keyword, the corresponding emotion descriptor is manually defined. The emotion descriptor is a set of descriptions of the emotion reactions corresponding to the keywords. Basically, it contains an emotional state label and an intensity value, which ranges from 0 to 1. The emotional state label can be one of the following six labels: happiness, sadness, anger, fear, surprise, and disgust. The intensity value describes how strongly the keyword belongs to this emotional state. In many cases, however, a word may contain one or more emotional reactions. Accordingly, there may be more than one emotion descriptor for each emotional keyword. For example, two emotional states, sadness and anger, are involved in the keyword \"disappointed.\" However, the keyword \"depressed\" is annotated with only one emotional state: sadness. After the tagging process is completed, the emotion descriptors of the word \"disappointed\" are {(2, 0.2), (3, 0.6)}, and the emotion descriptor of the word \"depressed\" is {(3, 0.6)}. The numbers 2 and 3 in the parentheses indicate the emotional states anger and sadness, respectively. The numbers 0.2 and 0.6 represent the degree of the emotional states. In the following, we describe how the emotional state is calculated. Consider the following input sentence at time t: The emotion descriptors of each emotional keyword are manually defined based on a Chinese lexicon containing 65620 words. In order to eliminate errors due to subjective judgment, all the words are firstly tagged by three people individually and then cross validated by the other two people. For each word, if the results tagged by different people are close, the average of these values will be set as the emotion descriptors of the word. If the three people cannot reach a common consensus, an additional person will be asked to tag the word, and the result will be taken into consideration. Based on experience, only a few words need additional suggestions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotional Keyword Definition",
"sec_num": "3.2"
},
{
"text": "The final tagged results for the emotion descriptors are shown in Table 2 . A total of 496 words are defined as emotional keywords, and there are some ambiguities. Only 423 of them have unique emotional label definitions, 64 words have 2 emotional label definitions, and 9 words have 3 emotional label definitions. Most of the ambiguities occur in the anger and sadness categories. For example, the word \"unhappy\" may indicate an angry emotion or a sad emotion, according to the individual's personality and situation. ",
"cite_spans": [],
"ref_spans": [
{
"start": 66,
"end": 73,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Emotional Keyword Definition",
"sec_num": "3.2"
},
{
"text": "Besides, emotional keywords, emotion modification words also play an important role in emotion recognition. For example, the following three phrases have different emotional states and emotion degrees: \"happy,\" \"very happy,\" and \"not happy.\" The only difference between these three phrases is in the emotion modification words \"very\" and \"not.\" In order to quantify the emotional effect for different emotion modification words, we define an emotion modification value. According to the previous analysis of emotions [Lang. 1990] , all emotion modification words can be classified into two groups: positive emotion modification words and negative emotion modification words. Positive emotion modification words strengthen the current emotional state, while negative emotion modification words reverse the current emotional state. For example, \"very happy\" is stronger than \"happy\" because of the use of word \"very,\" but \"not happy\" may be sad or angry.",
"cite_spans": [
{
"start": 517,
"end": 529,
"text": "[Lang. 1990]",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Modification Value",
"sec_num": "3.3"
},
{
"text": "The emotion modification value is manually defined for each emotion modification word.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Modification Value",
"sec_num": "3.3"
},
{
"text": "It consists of a sign and a number. The sign indicates the positive or negative state of the emotion modification word, and the number indicates the modification strength of the emotion modification word. For example, the emotion modification values of the words or phrases \"a little,\" \"very,\" and \"extremely\" are +1, +2, and +3, respectively. And the emotion modification values of the words or phrases \"not at all,\" \"not,\" and \"never\" are -1, -2, and -3, respectively. The degree ranges from 1 to 3. For the example, in previous section, in the case of S t : \"We felt very disappointed and depressed at the results,\" the emotion modification word is represented by t j g , 1 t jN \u2264\u2264 , where N t is the number of emotion modification words in sentence S t . The corresponding emotion modification value of t j g is represented by t j u . In this example, 1 t g represents the word \"very.\" The values of N t and 1 t u are 1 and +2, respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Modification Value",
"sec_num": "3.3"
},
{
"text": "The final emotional state is the combination of the three outputs: the emotion recognition result obtained from acoustic features, the emotion descriptors of the emotional keywords, and the emotion modification values of the emotion modification words in this sentence. Given an input sentence S t at time t, the final emotion reaction obtained from the textual content of sentence S t is represented by C t E , which is a six dimension vector,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Final Emotional State Determination",
"sec_num": "4."
},
{
"text": "123456 ,,,,,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "CtCtCtCtCtCtC t Eeeeeee = .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "The six elements in C t E represent the relationship between sentence S t and the six emotional states: happiness, sadness, anger, fear, surprise, and disgust, respectively. Each value is calculated as follows: Slo is a step function with a value of 1 when ty z lo = and a value of 0 when ty z lo \u2260 . The constant 1/3 is used to normalize the emotion intensity value to the range from -1 to 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "( ) ( )",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "After the emotion reaction from the textual content has been calculated, the final emotion output t E is the combination",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "of A t E and C t E ,",
"eq_num": "( ) ( ) ( ) 123456 16"
}
],
"section": "( )",
"sec_num": null
},
{
"text": ",,, ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "= =+\u2212= .",
"eq_num": "(9)"
}
],
"section": "( )",
"sec_num": null
},
{
"text": "The emotion output of acoustic module",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "tA i e ranges from 0 to 1, and the emotion output of textual module",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "tC i e ranges from -1 to +1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "According to the assumption that the current emotional state is influenced by the previous emotional states, the output of the current emotion vector E t must be modified by means of its previous emotion vector E t-1 . The recursive calculation of the emotion history is defined as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( ) 1 1 ttt EEE \u03b4\u03b4 \u2212 \u2032 =+\u2212 , 1 t \u2265 ,",
"eq_num": "(10)"
}
],
"section": "( )",
"sec_num": null
},
{
"text": "where E t is the t-th emotion vector calculated in as described in the previous section; t E\u2032 indicates that the final output considers the emotion history, and the initial value E 0 is the output without any modification. The combination coefficient \u03b4 is empirically set to 0.75.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "( )",
"sec_num": null
},
{
"text": "For the purpose of system evaluation, in order to obtain real emotional states from natural speech signals, we collected the training corpus from broadcast dramas. There were 1085 sentences in 227 dialogues from leading man and 1015 sentences in 213 dialogues from leading woman. The emotional states of these sentences were tagged manually. The emotion tagging results are listed in Table 3 . The system was implemented on a personal computer with a Pentium IV CPU and 512 MB of memory. A high-sensitivity microphone was connected to the computer and provided real-time information about speech signals.",
"cite_spans": [],
"ref_spans": [
{
"start": 384,
"end": 391,
"text": "Table 3",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Experimental Results",
"sec_num": "5."
},
{
"text": "As described in section II, 33 acoustic features are analyzed using PCA with thresholds of 90% and 0.2, which are the thresholds for deciding on the important principle components and the significant features of each component, respectively. The PCA process also divides the original feature space into 14 feature sub-spaces. The value of the threshold and the number of feature sub-spaces are decided experimentally.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiment on Acoustic Feature Extraction with PCA",
"sec_num": "5.1"
},
{
"text": "For acoustic feature evaluation, an SVM classification system was constructed for this experiment. The threshold for deciding on the important principle components (R 2 ) was set to be within a range of from 85% to 100% with a step size of 2%, and the threshold for deciding on the significant features of components (T ) was set to be with a range of from \u22121 to 1 with a step size of 0.1. The experimental results are shown in Figure 4 . As shown in Figure 4 , the achieved recognition rate was 63.33% when 1 T =\u2212 . When R 2 = 91% and T = 0, the achieved recognition rate was 81.55%, the highest rate obtained in all the tests. The results show that after PCA was performed, the orthogonal feature space was extracted from the original feature sets when R 2 = 91% and T = 0, and the emotion recognition rate also increased due to the elimination of dependency.",
"cite_spans": [],
"ref_spans": [
{
"start": 428,
"end": 436,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 451,
"end": 459,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Experiment on Acoustic Feature Extraction with PCA",
"sec_num": "5.1"
},
{
"text": "Based on the results, we could decide on the appropriate number of feature sub-spaces. Figure 5 shows the relation between the number of sub-spaces and R 2 . Since the previous experiment indicated that an appropriate value of R 2 was 91%, the appropriate number of sub-spaces was chosen as 14 based on the curve in ",
"cite_spans": [],
"ref_spans": [
{
"start": 87,
"end": 95,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Experiment on Acoustic Feature Extraction with PCA",
"sec_num": "5.1"
},
{
"text": "Since the emotion recognition rate of the textual module depends on the recognition rate of the keyword spotting system, the aim of this experiment was to identify the relationship between the keyword spotting system and textual emotion recognition module. The test corpus is first prepared with all emotional keywords are annotated manually, and then all the emotional keywords in test corpus was selected randomly and fed to the textual emotion recognition module. The emotion recognition rates of the textual module according to varying ratio of the number of emotional keywords are illustrated in Figure 6 .",
"cite_spans": [],
"ref_spans": [
{
"start": 601,
"end": 609,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experiment on Keyword Spotting",
"sec_num": "5.2"
},
{
"text": "Numbers of feature subspace ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiment on Keyword Spotting",
"sec_num": "5.2"
},
{
"text": "As shown in Figure 6 , the emotion recognition rate of the textual module did not increase after the ratio of selected keywords reached an accuracy rate of 75%. That means if the keyword recognition rate is higher than 75%, the output of the textual emotion recognition module will reach an upper bound. Since the keyword recognition rate of the system can reach 89.6%, this keyword spotting system is suitable for the textual emotion recognition module.",
"cite_spans": [],
"ref_spans": [
{
"start": 12,
"end": 20,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Figure 6. The relationship between the keyword recognition rate and the emotion recognition rate.",
"sec_num": null
},
{
"text": "In this experiment, 14 feature subspaces were adopted. The radial basis function was chosen as the kernel function in the SVM model. Table 4 shows the results obtained by acoustic module. Since the dramatic dialogues were spoken by professional actors, the variation of speech intonation was very large with, therefore, decreased the recognition rate. In addition, the recognition rates for neutral and sadness were a little higher than those for other emotions.",
"cite_spans": [],
"ref_spans": [
{
"start": 133,
"end": 140,
"text": "Table 4",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained from Acoustic Information",
"sec_num": "5.3"
},
{
"text": "Checking the speech corpus, we found that the intonation patterns for neutral and sadness are more stable than those for other emotions. This was the main reason why these experimental results were obtained.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained from Acoustic Information",
"sec_num": "5.3"
},
{
"text": "Textual emotion recognition rate (%) Ratio of selected emotion keywords (%) The acoustic module is based on the assumption that the speech information is too complicated to be classified using only one SVM. Thus, PCA is used to generate the feature subspace. In order to test this assumption, we compared the recognition results for speech input obtained using the classifier with a single SVM and multiple SVMs. Table 5 shows the comparison and confirms the assumption. ",
"cite_spans": [],
"ref_spans": [
{
"start": 413,
"end": 420,
"text": "Table 5",
"ref_id": "TABREF7"
}
],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained from Acoustic Information",
"sec_num": "5.3"
},
{
"text": "The experimental results obtained by the textual emotion recognition module are listed in Table 6 . From these results, we can find that the recognition rate cannot achieve the same level in the case of the acoustic module, i.e., the keyword-based approach cannot achieve satisfactory performance. The reasons of these results are two twofold. Firstly, owing to the complexity of natural language, sentences with the same emotional state may not contain the same emotional keywords. Secondly, as mentioned above, less than 500 words are labeled as emotional keywords from a total of 65620 words. This leads to the low occurrence rate of the occurrence of emotional keywords. But when emotional keywords appear in a sentence, the emotional reaction of the sentence is always strongly related to these keywords. The keyword-based approach is still helpful for improving performance when integrated with the acoustic module. ",
"cite_spans": [],
"ref_spans": [
{
"start": 90,
"end": 97,
"text": "Table 6",
"ref_id": "TABREF8"
}
],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained from Textual Content",
"sec_num": "5.4"
},
{
"text": "Finally, the experimental results obtained using the integrated system are shown in Table 7 .",
"cite_spans": [],
"ref_spans": [
{
"start": 84,
"end": 91,
"text": "Table 7",
"ref_id": "TABREF9"
}
],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained Using the Integrated System",
"sec_num": "5.5"
},
{
"text": "The outside test was performed using an extra corpus collected from the same broadcast drama.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained Using the Integrated System",
"sec_num": "5.5"
},
{
"text": "There were a total of 200 sentences in 51 dialogues in this corpus. When the integration strategy was used, the performance of the integrated system is better than that any of the individual modules. Compared with the results obtained by the acoustic module, the results obtained with the integrated system were 5.05% higher. In order to understand the results, we verified the test corpus manually and found that when a sentence was recognized as having one emotional state, it usually contained either emotional keywords or no keywords. Only a few sentences contained emotional keywords with opposite the emotional states. Thus when the output of the acoustic module was reliable, the output of the textual module could slightly support the results obtained by the acoustic module. But if the acoustic module could not identify the emotional state of an input sentence, the emotional keywords played an important role in the final calculation. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Emotion Recognition Results Obtained Using the Integrated System",
"sec_num": "5.5"
},
{
"text": "In this paper, an emotion recognition system with multi-modal input has been proposed. When",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6."
},
{
"text": "PCA and the SVM model are applied, the emotional state of a speech input can be classified and fed into the textual emotion recognition module. This approach to recognizing emotions from textual information is based on pre-defined emotion descriptors and emotion modification values. After all the emotion outputs have been integrated, the final emotional state is further smoothed by mean of the previous emotion history. The experimental results show that the multi-modal strategy is a more promising approach to emotion recognition than the single module strategy.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6."
},
{
"text": "In our study, we investigated a method of textual emotion recognition and also tested the combination of the two emotion recognition approaches. Our method can extract emotions from both speech and textual information without the need for a sophisticated speech recognizer. However, there are still many problems that remain to be solved. For example, in the textual emotion recognition module, syntactic structure information is important for natural language processing but cannot be obtained using HowNet alone. An additional parser may be needed to solve this problem. In the acoustic module, crying and laughing sounds are useful for deciding on the current emotional state but are hard to extract. A sound recognizer may, thus, be useful for improving the emotion recognition performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6."
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Emotional Intelligence",
"authors": [
{
"first": "P",
"middle": [],
"last": "Salovey",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Mayer",
"suffix": ""
}
],
"year": 1990,
"venue": "Imagination, Cognition and Personality",
"volume": "9",
"issue": "3",
"pages": "185--211",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Salovey, P. and J. Mayer, \"Emotional Intelligence,\" Imagination, Cognition and Personality, vol. 9, no. 3, 1990, pp.185-211.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "The Media Equation : How People Treat Computers, Television and New Media Like Real People and Places",
"authors": [
{
"first": "B",
"middle": [],
"last": "Reeves",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Nass",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Reeves, B. and C. Nass, \"The Media Equation : How People Treat Computers, Television and New Media Like Real People and Places,\" Cambridge Univ. Press, 1996.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Affect Analysis of Text Using Fussy Semantic Typing",
"authors": [
{
"first": "P",
"middle": [],
"last": "Subasic",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Huettner",
"suffix": ""
}
],
"year": 2001,
"venue": "IEEE Transactions on Fussy System",
"volume": "9",
"issue": "4",
"pages": "483--496",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Subasic, P. and A. Huettner, \"Affect Analysis of Text Using Fussy Semantic Typing,\" IEEE Transactions on Fussy System, vol. 9, no. 4, 2001, pp.483-496.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Bimodal Expression of Emotion by Face and Voice",
"authors": [
{
"first": "J",
"middle": [
"F"
],
"last": "Cohn",
"suffix": ""
},
{
"first": "G",
"middle": [
"S"
],
"last": "Katz",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of the sixth ACM international conference on Multimedia: Face/gesture recognition and their applications",
"volume": "",
"issue": "",
"pages": "41--44",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cohn, J.F. and G.S. Katz, \"Bimodal Expression of Emotion by Face and Voice,\" Proceedings of the sixth ACM international conference on Multimedia: Face/gesture recognition and their applications, 1998, pp.41-44.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Bimodal Emotion Recognition",
"authors": [
{
"first": "L",
"middle": [],
"last": "Silva",
"suffix": ""
},
{
"first": "N",
"middle": [
"P"
],
"last": "De",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Chi",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Fourth IEEE International Conference on Automatic Face and Gesture Recognition",
"volume": "",
"issue": "",
"pages": "332--335",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Silva, L. C De and N.P. Chi, \"Bimodal Emotion Recognition,\" Proceedings of the Fourth IEEE International Conference on Automatic Face and Gesture Recognition , 2000, pp.332-335.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Effect of Sensor Fusion for recognition of Emotional States Using Voice, Face Image and Thermal Image of Face",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Yoshitomi",
"suffix": ""
},
{
"first": "S",
"middle": [
"I"
],
"last": "Kim",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Kawano",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Kitazoe",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the ninth IEEE International Workshop on Robot and Human Interactive Communication",
"volume": "",
"issue": "",
"pages": "173--183",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yoshitomi, Y., S.I. Kim, T. Kawano, and T. Kitazoe, \"Effect of Sensor Fusion for recognition of Emotional States Using Voice, Face Image and Thermal Image of Face,\" Proceedings of the ninth IEEE International Workshop on Robot and Human Interactive Communication, 2000, pp.173-183.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Extracting Emotion from Voice",
"authors": [
{
"first": "S",
"middle": [],
"last": "Fukuda",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Kostov",
"suffix": ""
}
],
"year": 1999,
"venue": "Proceedings of IEEE International Workshop on Systems, Man, and Cybernetics",
"volume": "4",
"issue": "",
"pages": "299--304",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fukuda, S. and V. Kostov, \"Extracting Emotion from Voice,\" Proceedings of IEEE International Workshop on Systems, Man, and Cybernetics, vol. 4, 1999, pp.299-304.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Emotion Detection from Speech to Enrich Multimedia Content",
"authors": [
{
"first": "F",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Y",
"middle": [
"Q"
],
"last": "Xu",
"suffix": ""
},
{
"first": "H",
"middle": [
"Y"
],
"last": "Shum",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of IEEE Pacific Rim Conference on Multimedia",
"volume": "",
"issue": "",
"pages": "550--557",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yu, F., E. Chang, Y.Q. Xu, and H.Y. Shum, \"Emotion Detection from Speech to Enrich Multimedia Content,\" Proceedings of IEEE Pacific Rim Conference on Multimedia, 2001, pp.550-557.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Emotion Recognition from Textual Input using an Emotional Semantic Network",
"authors": [
{
"first": "Z",
"middle": [
"J"
],
"last": "Chuang",
"suffix": ""
},
{
"first": "C",
"middle": [
"H"
],
"last": "Wu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of IEEE International Conference on Spoken Language Processing",
"volume": "",
"issue": "",
"pages": "2033--2036",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chuang, Z.J. and C.H. Wu, \"Emotion Recognition from Textual Input using an Emotional Semantic Network,\" Proceedings of IEEE International Conference on Spoken Language Processing, 2002, pp.2033-2036.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Legendre Functions and Orthogonal Polynomials",
"authors": [
{
"first": "M",
"middle": [],
"last": "Abramowitz",
"suffix": ""
},
{
"first": "I",
"middle": [
"A"
],
"last": "Stegun",
"suffix": ""
}
],
"year": 1972,
"venue": "Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables",
"volume": "",
"issue": "",
"pages": "331--339",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Abramowitz, M. and I.A. Stegun, \"Legendre Functions and Orthogonal Polynomials,\" in Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables, New York: Dover, 1972, pp.331-339.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "An Introduction to Support Vector Machines",
"authors": [
{
"first": "N",
"middle": [],
"last": "Cristianini",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Shawe-Taylor",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cristianini, N. and J. Shawe-Taylor, \"An Introduction to Support Vector Machines,\" Cambridge University Press, 2001.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Multi-Keyword Spotting of Telephone Speech Using Fuzzy Search Algorithm and Keyword-Driven Two-Level CBSM",
"authors": [
{
"first": "C",
"middle": [
"H"
],
"last": "Wu",
"suffix": ""
},
{
"first": "Y",
"middle": [
"J"
],
"last": "Chen",
"suffix": ""
}
],
"year": 2001,
"venue": "Speech communication",
"volume": "33",
"issue": "",
"pages": "197--212",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wu, C.H. and Y.J. Chen, \"Multi-Keyword Spotting of Telephone Speech Using Fuzzy Search Algorithm and Keyword-Driven Two-Level CBSM,\" Speech communication, Vol.33, 2001, pp.197-212.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Emotion, atten Lang tion, and the startle reflex",
"authors": [
{
"first": "P",
"middle": [
"J"
],
"last": "Lang",
"suffix": ""
},
{
"first": "M",
"middle": [
"M"
],
"last": "Bradley",
"suffix": ""
},
{
"first": "B",
"middle": [
"N"
],
"last": "Cuthbert",
"suffix": ""
}
],
"year": 1990,
"venue": "Psychological Review",
"volume": "97",
"issue": "",
"pages": "377--395",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lang, P.J., M.M. Bradley, and B.N. Cuthbert, \"Emotion, atten Lang tion, and the startle reflex,\" Psychological Review, 97, 1990, pp.377-395.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"text": "the classification confidence of the class; the classification accuracy evaluated based on the training data is used to define the classification confidence of class c, c Numberofsentencescorrectlyrecognizedasclassc P Totalnumberofsentencesinclassc = .",
"num": null,
"uris": null
},
"FIGREF1": {
"type_str": "figure",
"text": "Diagram of textual emotion recognition module.",
"num": null,
"uris": null
},
"FIGREF2": {
"type_str": "figure",
"text": "S t : \"We felt very disappointed and depressed at the results.\"Here, the i th emotional keyword is represented by ti k , 1 t iM \u2264\u2264, and M t is the number of keywords in sentence S t . In this example, words \"disappointed\" and \"depressed,\" respectively, and the value of M t is 2. For each emotional keyword t r th emotional state label, and ti r v is the r th intensity value of t i k . The value of the emotional state label can range from 1 to 6, corresponding to six emotional states: happiness, sadness, anger, fear, surprise, and disgust. In this case, the values of",
"num": null,
"uris": null
},
"FIGREF3": {
"type_str": "figure",
"text": "Emotion recognition rates for acoustic features under different PCA thresholds. The black line indicates the results obtained when R 2 = 91%, and the two gray lines indicate the results obtained when R 2 = 85% and 100%.",
"num": null,
"uris": null
},
"FIGREF4": {
"type_str": "figure",
"text": "The relationship between R2 and the number of feature sub-spaces.",
"num": null,
"uris": null
},
"TABREF0": {
"type_str": "table",
"text": "shows an example of feature subspace generation. Suppose that F 1 to F 5 are the original features, that P 1 and P 2 are the selected principle components in PCA, and that the values indicate the combination weights. By selecting the original features according to values",
"num": null,
"content": "<table/>",
"html": null
},
"TABREF1": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>1 , F 3 , F 4 } and {F 2 ,</td></tr></table>",
"html": null
},
"TABREF2": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td colspan=\"3\">Number of tagged emotion labels of an</td><td/></tr><tr><td/><td>emotional keyword</td><td/><td>Total</td></tr><tr><td>1</td><td>2</td><td>3</td><td/></tr><tr><td>423</td><td>64</td><td>9</td><td>496</td></tr></table>",
"html": null
},
"TABREF5": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td/><td/><td>Number of tagged sentences</td></tr><tr><td/><td>Male</td><td>Female</td></tr><tr><td>Happiness</td><td>126</td><td>121</td></tr><tr><td>Sadness</td><td>121</td><td>92</td></tr><tr><td>Anger</td><td>98</td><td>80</td></tr><tr><td>Fear</td><td>60</td><td>58</td></tr><tr><td>Surprise</td><td>196</td><td>172</td></tr><tr><td>Disgust</td><td>106</td><td>113</td></tr><tr><td>Neutral</td><td>1617</td><td>1530</td></tr></table>",
"html": null
},
"TABREF6": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td/><td/><td>Recognition rate</td><td/></tr><tr><td/><td>Male</td><td>Female</td><td>Average</td></tr><tr><td>Happiness</td><td>78.85%</td><td>71.90%</td><td>75.37%</td></tr><tr><td>Sadness</td><td>85.40%</td><td>88.04%</td><td>86.72%</td></tr><tr><td>Anger</td><td>81.52%</td><td>75.00%</td><td>78.26%</td></tr><tr><td>Fear</td><td>72.13%</td><td>70.18%</td><td>71.16%</td></tr><tr><td>Surprise</td><td>73.55%</td><td>62.54%</td><td>68.05%</td></tr><tr><td>Disgust</td><td>76.32%</td><td>68.79%</td><td>72.56%</td></tr><tr><td>Neutral</td><td>88.38%</td><td>77.53%</td><td>82.96%</td></tr><tr><td>Average</td><td>79.45%</td><td>73.43%</td><td>76.44%</td></tr></table>",
"html": null
},
"TABREF7": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td/><td>Multiple SVM</td><td>Single SVM</td></tr><tr><td>Happiness</td><td>75.37%</td><td>68.13%</td></tr><tr><td>Sadness</td><td>86.72%</td><td>75.91%</td></tr><tr><td>Anger</td><td>78.26%</td><td>66.57%</td></tr><tr><td>Fear</td><td>71.16%</td><td>60.55%</td></tr><tr><td>Surprise</td><td>68.05%</td><td>55.62%</td></tr><tr><td>Disgust</td><td>72.56%</td><td>64.54%</td></tr><tr><td>Neutral</td><td>82.96%</td><td>70.01%</td></tr><tr><td>Total</td><td>76.44%</td><td>65.90%</td></tr></table>",
"html": null
},
"TABREF8": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td/><td/><td>Recognition rate</td><td/></tr><tr><td/><td>Male</td><td>Female</td><td>Average</td></tr><tr><td>Happiness</td><td>66.35%</td><td>63.64%</td><td>64.99%</td></tr><tr><td>Sadness</td><td>59.12%</td><td>61.96%</td><td>60.54%</td></tr><tr><td>Anger</td><td>76.09%</td><td>72.50%</td><td>74.29%</td></tr><tr><td>Fear</td><td>71.03%</td><td>65.51%</td><td>68.27%</td></tr><tr><td>Surprise</td><td>66.85%</td><td>58.46%</td><td>62.66%</td></tr><tr><td>Disgust</td><td>57.12%</td><td>55.34%</td><td>56.23%</td></tr><tr><td>Neutral</td><td>77.98%</td><td>64.76%</td><td>71.37%</td></tr><tr><td>Average</td><td>67.79%</td><td>63.17%</td><td>65.48%</td></tr></table>",
"html": null
},
"TABREF9": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td/><td>Inside</td><td>Outside</td></tr><tr><td>Happiness</td><td>84.44%</td><td>66.67%</td></tr><tr><td>Sadness</td><td>82.98%</td><td>73.91%</td></tr><tr><td>Anger</td><td>79.66%</td><td>67.65%</td></tr><tr><td>Fear</td><td>78.24%</td><td>62.37%</td></tr><tr><td>Surprise</td><td>80.33%</td><td>69.52%</td></tr><tr><td>Disgust</td><td>76.51%</td><td>70.43%</td></tr><tr><td>Neutral</td><td>88.24%</td><td>76.84%</td></tr><tr><td>Average</td><td>81.49%</td><td>69.63%</td></tr></table>",
"html": null
}
}
}
} |