File size: 80,059 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
{
    "paper_id": "I11-1040",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:32:41.499653Z"
    },
    "title": "Detecting and Blocking False Sentiment Propagation",
    "authors": [
        {
            "first": "Hye-Jin",
            "middle": [],
            "last": "Min",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "KAIST Daejeon",
                "location": {
                    "country": "Republic of Korea"
                }
            },
            "email": "hjmin@nlp.kaist.ac.kr"
        },
        {
            "first": "Jong",
            "middle": [
                "C"
            ],
            "last": "Park",
            "suffix": "",
            "affiliation": {},
            "email": "park@cs.kaist.ac.kr"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Sentiment detection of a given expression involves interaction with its component constituents through rules such as polarity propagation, reversal or neutralization. Such compositionality-based sentiment detection usually performs better than a vote-based bag-ofwords approach. However, in some contexts, the polarity of the adjectival modifier may not always be correctly determined by such rules, especially when the adjectival modifier characterizes the noun so that its denotation becomes a particular concept or an object in customer reviews. In this paper, we examine adjectival modifiers in customer review sentences whose polarity should either be propagated (SHIFT) or not (UNSHIFT). We refine polarity propagation rules in the literature by considering both syntactic and semantic clues of the modified nouns and the verbs that take such nouns as arguments. The resulting rules are shown to work particularly well in detecting cases of 'UNSHIFT' above, improving the performance of overall sentiment detection at the clause level, especially in 'neutral' sentences. We also show that even such polarity that is not propagated is still necessary for identifying implicit sentiment of the adjacent clauses.",
    "pdf_parse": {
        "paper_id": "I11-1040",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Sentiment detection of a given expression involves interaction with its component constituents through rules such as polarity propagation, reversal or neutralization. Such compositionality-based sentiment detection usually performs better than a vote-based bag-ofwords approach. However, in some contexts, the polarity of the adjectival modifier may not always be correctly determined by such rules, especially when the adjectival modifier characterizes the noun so that its denotation becomes a particular concept or an object in customer reviews. In this paper, we examine adjectival modifiers in customer review sentences whose polarity should either be propagated (SHIFT) or not (UNSHIFT). We refine polarity propagation rules in the literature by considering both syntactic and semantic clues of the modified nouns and the verbs that take such nouns as arguments. The resulting rules are shown to work particularly well in detecting cases of 'UNSHIFT' above, improving the performance of overall sentiment detection at the clause level, especially in 'neutral' sentences. We also show that even such polarity that is not propagated is still necessary for identifying implicit sentiment of the adjacent clauses.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Detecting the sentiment of a given grammatical unit such as phrase, clause or sentence has received much attention in opinion mining and sentiment analysis. While earlier work simply detected the overall polarity by computing the majority of the polarity of words within the expression, researchers are now looking more into the composition of polarity of words within the expression (Wilson et al., 2005; Moilanen and Pulman, 2007; Choi and Cardie, 2008) . They have utilized either word features (e.g., 'Context Valence Shifters') or grammatical structures (e.g., 'the Principle of Compositionality'). It is shown that a machine learning approach with these features performs better than a vote-based bag-ofwords approach. While the importance of salient features such as 'negation' or 'intensifier' is fully recognized, it is not yet clearly understood when the polarity of a particular word is propagated or is sacrificed.",
                "cite_spans": [
                    {
                        "start": 384,
                        "end": 405,
                        "text": "(Wilson et al., 2005;",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 406,
                        "end": 432,
                        "text": "Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 433,
                        "end": 455,
                        "text": "Choi and Cardie, 2008)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The polarity of an adjectival modifier is often propagated to the polarity of the modified noun or noun phrases with no inherent polarity. However, sometimes the polarity is not propagated to that of the enclosing clause or sentence at all. For example, the polarity of the word 'real' is not propagated to that of the sentence \"Real 501's are made of 14 oz canvas-like material.\" in a customer review of the pants, even though there is no salient sentiment word except for the word 'real'. Our observation shows that stopped propagation of this kind in customer reviews often appears because of the following reasons: 1) the word in question is mainly used to refer to the property or the identity of the product entity; 2) it is mainly used to describe certain processes about the author's experiences or to provide a useful guide for potential customers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "It is important to make the correct decision about the polarity propagation in particular regarding no propagation of the polarity of such an adjectival modifier, in order to detect the sentiment over customer reviews at a deeper linguistic level. For example, the word 'real' above is chosen to refer to the other comparative entity, which is regarded as a 'positive' entity, as opposed to the present one that is not 'real'. Hence, the 'positive' polarity should not be propagated to the polarity of the current reviewed product entity in the context. The benefit of this decision is that it will enhance the detection of the 'neutral' polarity of the sentences in a document. This decision can also be utilized in identifying the underlying 'negative' sentiment of the given sentence. Although it is still hard to detect the case by just looking into the sentiment of the words at the surface level, this will still work as a good clue for the detection because such a word is in contrast to the phrase 'these Iconic Rigid jeans' as in \"These Iconic Rigid jeans are made of some sleazy, much lighter material\", which is the sentence that follows. By considering these two sentences together, we can see that a 'negative' sentiment is conveyed. Previous work on sentiment detection from customer reviews mainly focuses on detecting sentiment of product features from the patternized sentences (Hu and Liu, 2004; Popescu and Etzioni, 2005; Titov and McDonald, 2008) , so the sentences containing such implicit sentiment were not analyzed properly, despite of its importance.",
                "cite_spans": [
                    {
                        "start": 1395,
                        "end": 1413,
                        "text": "(Hu and Liu, 2004;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 1414,
                        "end": 1440,
                        "text": "Popescu and Etzioni, 2005;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 1441,
                        "end": 1466,
                        "text": "Titov and McDonald, 2008)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we examine adjectival modifiers in customer review sentences whose polarity should either be propagated (SHIFT) or not (UNSHIFT) when the modified noun has no inherent polarity. We refine the previous polarity propagation rules (Moilanen and Pulman, 2007) in order to enhance the performance of the propagation decision by considering both syntactic and semantic clues of the modified nouns and the verbs that take such modified nouns as arguments.",
                "cite_spans": [
                    {
                        "start": 243,
                        "end": 270,
                        "text": "(Moilanen and Pulman, 2007)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our rules incorporating these clues into the previous rules have an important role in detecting the 'UNSHIFT' case. We found that our rules help the overall sentiment detection at the clause level especially regarding the 'neutral' cases but found also that even such polarity with no propagation is also necessary identifying the implicit sentiment of the adjacent clauses.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The rest of the paper is organized as follows. Section 2 introduces previous work analyzing the sentiment in customer reviews focusing on the detection of the polarity. Section 3 summarizes compositionality-based polarity detection in this paper. Sections 4 and 5 describe basic and refined polarity decision rules for adjectival modifiers. Section 6 analyzes our experimental results and Section 7 discusses its importance and limitation. Section 8 concludes the paper with future work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Previous work on the detection of the opinions and sentiments to a given product can be divided into three groups: graph-based method with polarity words, rule-based and machine learning-based methods focusing on sentiment detection in a compositional way. Hu and Liu (2004) identified the sentiment by counting the relevant adjectives that belong to each polarity class with a graph-based polarity lexicon. Popescu and Etzioni (2005) determined the polarity of opinioncontaining phrases by identifying the polarity of the words based on relaxation labeling.",
                "cite_spans": [
                    {
                        "start": 257,
                        "end": 274,
                        "text": "Hu and Liu (2004)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 408,
                        "end": 434,
                        "text": "Popescu and Etzioni (2005)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "The rule-based sentiment identification methods are based on the Principle of Compositionality (Moilanen and Pulman, 2007; Neviarouskaya et al., 2009) . Such methods determine the polarity of a given unit basically by composing the inherent polarity of its component lexical units or other linguistic constituents. In addition, a certain type of unit called 'valence shifters' works to contextually neutralize or intensify the polarity of the given phrase or sentence (Polanyi and Zaenen, 2004; Kennedy and Inkpen, 2006) . Our work is also based on the polarity decision rules proposed by the previous work, and we modified some of them for our purpose. The benefit of rule-based approach is that it is easy to incorporate the additional rules into a rule-based framework for further detailed classification with additional categories.",
                "cite_spans": [
                    {
                        "start": 95,
                        "end": 122,
                        "text": "(Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 123,
                        "end": 150,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 468,
                        "end": 494,
                        "text": "(Polanyi and Zaenen, 2004;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 495,
                        "end": 520,
                        "text": "Kennedy and Inkpen, 2006)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Some researchers incorporated rule-based sentiment identification into machine learning techniques (Wilson et al., 2005; Choi and Cardie, 2008) . Wilson and colleagues (2005) developed the classifier using AdaBoost.HM based on the idea of contextual valence shifters in order to identify contextual polarity at the phrase level. One of the features they considered is modification feature, modifies (parent with polarity), and modified (children with polarity), though they did not examine the context in which these types of feature may or may not contribute to the overall polarity. Choi and Cardie (2008) developed a machine-learning based sentiment detection method by adopting the Principle of Compositionality (Moilanen and Pulman, 2007) in order to examine whether such computational semantic-based idea can be made empirically effective. Their results show that the method incorporating compositionality performed best among all the methods. Our work is similar to their work in that we followed the idea of the Principle of Compositionality. However, our focus is on examining the characteristics of context surrounding a given adjectival modifier when its polarity is either propagated or not propagated and seeing how this propagation result affects the overall polarity of the clause.",
                "cite_spans": [
                    {
                        "start": 99,
                        "end": 120,
                        "text": "(Wilson et al., 2005;",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 121,
                        "end": 143,
                        "text": "Choi and Cardie, 2008)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 146,
                        "end": 174,
                        "text": "Wilson and colleagues (2005)",
                        "ref_id": null
                    },
                    {
                        "start": 585,
                        "end": 607,
                        "text": "Choi and Cardie (2008)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 716,
                        "end": 743,
                        "text": "(Moilanen and Pulman, 2007)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Previous work on deciding the overall polarity of the given expression based on the Principle of Compositionality (Moilanen and Pulman, 2007; Neviarouskaya et al., 2009) takes into account how component lexical units are syntactically combined and develops rules to handle contextual polarity propagation, reversal, conflict or neutralization when combining the inherent polarities of the component lexical units.",
                "cite_spans": [
                    {
                        "start": 114,
                        "end": 141,
                        "text": "(Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 142,
                        "end": 169,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Detection based on Compositionality",
                "sec_num": "3"
            },
            {
                "text": "We follow the polarity decision rules from previous work (Moilanen and Pulman, 2007; Shaikh et al., 2007; Neviarouskaya et al., 2009) as shown below. We apply the rules to each sentence with its dependency tree structure acquired from the Stanford parser (Klein and Manning, 2003; Marneffe et al., 2006) .",
                "cite_spans": [
                    {
                        "start": 57,
                        "end": 84,
                        "text": "(Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 85,
                        "end": 105,
                        "text": "Shaikh et al., 2007;",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 106,
                        "end": 133,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 255,
                        "end": 280,
                        "text": "(Klein and Manning, 2003;",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 281,
                        "end": 303,
                        "text": "Marneffe et al., 2006)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Detection based on Compositionality",
                "sec_num": "3"
            },
            {
                "text": "\uf0b7 Basic Propagation (Moilanen and Pulman, 2007; Neviarouskaya et al., 2009) : The polarity of the lexical unit at the upper level in the dependency structure of the text unit has a higher priority. If the word at the upper level has no inherent polarity, the polarity of its dependent word (at the lower level) is propagated to the polarity of the text unit.",
                "cite_spans": [
                    {
                        "start": 20,
                        "end": 47,
                        "text": "(Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 48,
                        "end": 75,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Detection based on Compositionality",
                "sec_num": "3"
            },
            {
                "text": "OBJ/Comp domination for the case of transfer verbs (Moilanen and Pulman, 2007) : The polarity of the constituent as an object or a complement of the transfer verbs that \"transmit sentiments among the arguments\" (Neviarouskaya et al., 2009) is dominant when there is a polarity conflict among arguments of such verbs. (e.g., \"My good old size started showing up too big or wouldn't shrink right.\") \uf0b7",
                "cite_spans": [
                    {
                        "start": 51,
                        "end": 78,
                        "text": "(Moilanen and Pulman, 2007)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 211,
                        "end": 239,
                        "text": "(Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "Reversal (Moilanen and Pulman, 2007; Neviarouskaya et al., 2009) : The negation words (e.g., 'not', 'no', 'hardly', 'reduce') reverse the polarity. We added more verb entries containing the meaning of 'reversal' from other existing review corpora.",
                "cite_spans": [
                    {
                        "start": 9,
                        "end": 36,
                        "text": "(Moilanen and Pulman, 2007;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 37,
                        "end": 64,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "Reversal for Conflict (Both negative adverbs and negative verbs are combined from (Shaikh et al., 2007) ): When two lexical units with 'negative' polarity are combined, the polarity of the unit covering both units is reversed. (e.g., \"They are 501, it's hard to go wrong with these.\": positive)",
                "cite_spans": [
                    {
                        "start": 82,
                        "end": 103,
                        "text": "(Shaikh et al., 2007)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "Neutralizing operators (Condition operators (e.g., if) from Neviarouskaya et al., 2009) : The polarity of the main clause or the sentence is neutralized when there are adverbial conditional clauses. We added the markers 'wh-words' or 'how' as well as the conditional marker 'if/unless'. (e.g., \"How can I go wrong with the classic 501?\")",
                "cite_spans": [
                    {
                        "start": 60,
                        "end": 87,
                        "text": "Neviarouskaya et al., 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "By following the compositionality-based polarity decision rules, the polarity of a noun or a noun phrase that has no inherent polarity is determined by its modifier's polarity. In other words, the polarity of the modifier is propagated to the upper level node in the dependency tree structure. For example, the noun phrase 'a perfect dryer' becomes to have 'positive' polarity by the result of polarity propagation. And such polarity may or may not be propagated depending on its syntactic role of the noun phrase at the clause level. If the phrase is a subject, it gets lower priority than the one that works as an object or a complement, but if the word at the upper level or the word with higher priority at the same level (i.e., object or complement) has no inherent polarity, its polarity can be propagated up to the root level by the 'Basic Propagation'. There is no word with inherent polarity except 'real' in (1a), so the overall polarity could be de-cided as 'positive' by the rules just like (1b), but it is actually closer to 'neutral' sentence. The reason is that the adjective is utilized to refer to another product entity, which is 'the original Levis 501 Jean' in this context. Interestingly, we see that such phrases often appear in the customer reviews of a product which is a steady seller and whose quality is already well known. To detect whether the polarity of the adjectival modifier is propagated or not is crucial especially when there are no other salient polarity words except for the adjective. It is mainly used to refer to the other product entity for contrastive purposes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "In this paper, we examine the types of clues that affect the propagation of the adjectival modifier's polarity at the clause level. We also refine the previous polarity decision rules by incorporating additional clues. With the refined rules, we define our problem as follows. For a given adjectival modifier modifying a noun or a noun phrase with no inherent polarity, we label it with 'SHIFT/UNSHIFT' tags depending on the nature of propagation. If it is propagated, we label it with the 'SHIFT' tag, and if not, we do it with the 'UNSHIFT' tag.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "The basic rules for labeling by considering only syntactic clues from the previous polarity decision rules are as follows.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "\uf0b7 SHIFT: 1) if the syntactic role of the noun phrase is complement; 2) if the syntactic role of the noun phrase is object of verbs or prepositions \uf0b7 UNSHIFT: 1) if the syntactic role of the noun phrase is subject (e.g., (1a)); 2) if the syntactic role of the noun phrase is object of the verb whose syntactic type is either 'gerund (Ving)' or 'infinitive (to V)'",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "We also regarded the case as 'UNSHIFT' where the noun phrase has lower priority than its sibling phrases in the dependency tree; for example, if there is an object with non-neutral polarity and the syntactic role of the given noun phrase is subject, the labeling is done with 'UNSHIFT'. Example (2) shows 'SHIFT' and 'UNSHIFT' for the adjectival modifier 'good' and 'great', respectively.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "(2) a. It's a good buy. b. A great shave takes a little more commitment than just breaking out a can of foam and a disposable razor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "We decided not to use machine learning techniques with the following reasons. First, our goal is not to enhance the overall performance of sentiment detection in general but to examine what kinds of additional clues are called especially for the decision of the polarity propagation of the adjectives modifying the noun with no polarity. Following Kennedy and Inkpen (2006) 's work for measuring the impact of valence shifters on sentiment classification, we believe it is not straightforward to identify major factors for the improvement with a machine learning algorithm. Second, our work focuses on relatively small cases among all the cases in the whole review sentences (See Table 5 ), so it is reasonable to directly apply refined rules to each case without an unnecessary training process handling other cases. We believe that the rules of this kind by taking a closer look at the focused cases could be extended regarding scalability with the help of the machine leaning techniques in the future.",
                "cite_spans": [
                    {
                        "start": 348,
                        "end": 373,
                        "text": "Kennedy and Inkpen (2006)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 680,
                        "end": 687,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Basic Rules for Adjectival Modifier",
                "sec_num": "4"
            },
            {
                "text": "We refined the rules with additional clues as follows because the basic rules do not work properly in some context.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Refinement",
                "sec_num": "5"
            },
            {
                "text": "The basic rules mainly consider the syntactic types of the noun phrase and the verb taking the noun phrase as the argument. However, the following clues at the phrase level may also affect the propagation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Phrase-level Clues",
                "sec_num": "5.1"
            },
            {
                "text": "Quoted / Capital Letters (UNSHIFT) \uf0b7 Types of noun in the product reviews Example (3) shows quoted adjectival modifiers. The quotes indicate that its inherent polarity is not effective. We can see that the author of the review intentionally used them to indicate such neutralization.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "(3) a. The fit on these \"relaxed\" jeans is just that--relaxed but not loose.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "b. If you love \"Happy Hippy\" shower gel, this fun bath product will impress you.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "Examples (4) and (5) show the polarity propagation depending on the types of modified noun by the adjectives. While the polarities in Example (4) are propagated, those in Example (5) are not propagated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "(4) a. This product also arrived in good condition and in good time. (Delivery) b. I will never order anything from Levi again until they come back to the original levi material. (Product feature) (5) a. They were also not much cheaper than I would have paid from a real retailer. ( The types of noun in Example (4) are related to explicit sentiment of the product. On the other hand, the types of noun in Example (5) are related to implicit sentiment or additional background information provided for the potential customers. In order to distinguish SHIFT cases from UN-SHIFT cases resulting from such different types of noun we built a lexicon for the noun type as shown in Table 1 . We collected the words belonging to each type by utilizing three different methods. We first manually collected words belonging to each noun type from the sample review texts and extended the entries by including synonyms of the seed words in WordNet (Synonyms; Miller, 1995) . Some synsets of WordNet such as 'body_part'/'illness' and 'shop' are appropriate for 'User information' and 'Location'. We combined several synsets for such type (WordNet Synsets ",
                "cite_spans": [
                    {
                        "start": 937,
                        "end": 947,
                        "text": "(Synonyms;",
                        "ref_id": null
                    },
                    {
                        "start": 948,
                        "end": 961,
                        "text": "Miller, 1995)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 281,
                        "end": 282,
                        "text": "(",
                        "ref_id": null
                    },
                    {
                        "start": 676,
                        "end": 683,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "\uf0b7",
                "sec_num": null
            },
            {
                "text": "The main reason for the second rule of the 'UN-SHIFT' basic rules in Section 4 is that we assumed the given phrase/clause could be regarded as a secondary concept or topic for the main concept or topic as shown in Example (6).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clause-level Clues",
                "sec_num": "5.2"
            },
            {
                "text": "(6) a. Anyone who is that determined to make the best product on the market, obviously will do whatever it takes to make it happen. b. Getting an outstanding shave from this razor should be a cinch.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clause-level Clues",
                "sec_num": "5.2"
            },
            {
                "text": "However, the given phrase/clause should be regarded as the main concept or an independent concept as shown in Example (7).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clause-level Clues",
                "sec_num": "5.2"
            },
            {
                "text": "(7) a. It seemed to have a rich sophistication which goes with horseback riding or polo. b. It's wonderful doing everything I need, including making my hair nice and shiny, without the heaviness.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clause-level Clues",
                "sec_num": "5.2"
            },
            {
                "text": "In order to capture these differences, we refined the rules as shown in Table 2 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 72,
                        "end": 79,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Clause-level Clues",
                "sec_num": "5.2"
            },
            {
                "text": "Gerund (Ving) IF the head of the infinitive has auxiliary characteristics such as 'seem' and 'need' THEN label it with SHIFT. Otherwise, label it with 'UNSHIFT'. IF the phrase/clause including the gerund is clausal subject THEN label it with 'UNSHIFT'. Otherwise label it with 'SHIFT'.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Infinitive (to V)",
                "sec_num": null
            },
            {
                "text": "The rule for the object of prepositions should also be refined. As we mentioned in Section 5.1, the reason for mentioning some particular types of object in the review is to explain additional background information as a guide for the potential customer as well as showing the sentiment about the product. The types of noun at the phrase level cannot always solely determine the polarity propagation because such decision is still affected by the presence of other constituents in the context at the clause level. For example, by comparing (5c) with the sentence \"it provides a very close, smooth shave\", the polarity of 'smooth' is propagated while that of 'great' is not propagated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 2. Refined rules for 'UNSHIFT'",
                "sec_num": null
            },
            {
                "text": "To handle this case properly, we consider 'Clause-level Semantic Label' at a shallow level by taking into account both some preposition types and the noun types together that frequently appear as shown in Table 3 . We named the labels by referring to 'Frame Index' from FrameNet data (Baker et al., 1998) . This list of the pairs filters further 'UNSHIFT' cases from the 'SHIFT' labeled cases by the basic rules. The last clue is about the sentence type. Even if the polarity of the adjectival modifier is propagated to the top node word at the clause level, the type of the sentence may block it for the overall polarity of the whole sentence. We consider three types of sentences that turn the 'SHIFT' label into the 'UNSHIFT' label as shown in Table 4 . Make sure you shake the bottle before using for best color results (as mentioned on the packaging). Table 4 . Types of sentences for 'UNSHIFT'",
                "cite_spans": [
                    {
                        "start": 284,
                        "end": 304,
                        "text": "(Baker et al., 1998)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 205,
                        "end": 212,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 747,
                        "end": 754,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 857,
                        "end": 864,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Table 2. Refined rules for 'UNSHIFT'",
                "sec_num": null
            },
            {
                "text": "As a number of previous researches also considered, we canceled the detected sentiment at the conditional clause. In addition, we considered two domain specific types of sentences, namely, experiences sentences and guide sentences as the clues for 'UNSHIFT' cases, because these types of sentences also give background information rather than explicitly mentioning the sentiment so that the polarity of the adjective tends not to be propagated. We defined experience sentence whose main subject is the author and which has present or past perfect tenses with purchase related verbs (e.g., buy, search, try or return). We also defined guide sentence that is an imperative sentence with no main subject or with the subject referring to the potential customer such as 'you' or 'people'.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Condition",
                "sec_num": null
            },
            {
                "text": "The preprocessing steps before applying the rules above are as follows. First, we get the dependency relation pairs for each input sentence acquired from the Stanford parser (Klein and Manning, 2003; Marneffe et al., 2006) , and constructed the dependency tree structure for tree traversal in order to process polarity propagation. Then we assigned each word to its inherent polarity (if it has one) by looking up the sentiment lexicon, 'Subjectivity Lexicon' (Riloff and Wiebe, 2003) . We adapted the lexicon to product reviews by modifying the inherent polarity of 36 lexical entries (e.g., white, positive to neutral) and adding 105 additional words frequently used (e.g., small with neutral). In order to apply rules to particular types of adjective and verb such as transfer verbs or contextually polarized adjectives, we added an additional field such as 'type' into each lexical entry to show their identities (The original types of 22 entries in 'Subjective Lexicon' are modified). As for extracting clues, we utilized dependency relations for syntactic types of nouns and verbs. For semantic types of nouns and verbs, we utilized the semiautomatically constructed lexicon as mentioned in Section 5.1. In addition, in order to identify 'experience sentences' and 'guide sentences', we extracted tense information and noun subject by utilizing dependency parse tree.",
                "cite_spans": [
                    {
                        "start": 174,
                        "end": 199,
                        "text": "(Klein and Manning, 2003;",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 200,
                        "end": 222,
                        "text": "Marneffe et al., 2006)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 460,
                        "end": 484,
                        "text": "(Riloff and Wiebe, 2003)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Condition",
                "sec_num": null
            },
            {
                "text": "We performed two types of experiment in order to examine the performance of our refined polarity propagation rules and the contribution of the propagation results to the sentiment detection at the clause level. Table 5 shows the data sets of customer reviews we used for the experiments. We first tested our rules with Set 1 (Beauty positive), a corpus utilized in (Blitzer et al., 2007) because all the reviews are classified as 'positive', so we assume that there are many adjectival modifiers with 'positive' polarity. We then performed both propagation decision and sentiment classification experiments with Set 2 (Levi's Jean), which is crawled review data from Amazon.com by ourselves. The reasons why we chose this product are as follows. First, it is a steady-selling product so that most of the reviews are regarded as positive, which makes it more important to identify negative or neutral opinions than other kinds of reviews. Hence, it is crucial to consider correct decision of propagation of the adjectival modifiers with 'positive' polarity that is mostly not propagated. Second, after the initial observation, we found that a particular expression about 'changes in quality' frequently appears in such reviews (about 20%) and the adjectival modifiers with 'positive' polarity in such expression are mostly not propagated because it would refer to other particular entities or be used to describe a certain process.",
                "cite_spans": [
                    {
                        "start": 365,
                        "end": 387,
                        "text": "(Blitzer et al., 2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 211,
                        "end": 218,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experimental Results",
                "sec_num": "6"
            },
            {
                "text": "Beauty Positive set 1 444 6,126 7.2% Levi's Jeans set 2 147 1,655 8.8% Table 5 . Data sets Table 6 shows the numbers of propagation rules and Table 7 shows the propagation decision results. Compared to the results by the basic rules, the performance is enhanced in general. However, we notice that the rules related to VerbType are effective on recall but not on precision for 'SHIFT'. On the other hand, as for 'UNSHIFT' the rules are effective on precision but not on recall. Rules taking into account both noun types and prepositions slightly enhance the overall performance. The overall rules that include sentence type score the best precision and recall figures, which are both effective for 'SHIFT' and 'UNSHIFT'.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 71,
                        "end": 78,
                        "text": "Table 5",
                        "ref_id": null
                    },
                    {
                        "start": 91,
                        "end": 98,
                        "text": "Table 6",
                        "ref_id": null
                    },
                    {
                        "start": 142,
                        "end": 149,
                        "text": "Table 7",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Data Sets # for exp. Total %",
                "sec_num": null
            },
            {
                "text": "Next, we apply these rules to our data set 2. Table 8 shows the propagation decision results. The accuracy for the overall test clauses is almost similar to that for set 1. While precision for 'UNSHIFT' and recall for 'SHIFT' rose, precision for 'SHIFT' and recall for 'UNSHIFT' dropped. We analyzed False Negative errors of 'UNSHIFT' cases. Most of them are unknown cases for each rule except due to parsing errors. This also led to the drop of the precision for 'SHIFT'. The strong restriction for 'UNSHIFT' also affected the result of recall for 'SHIFT'. Table 9 shows the sentiment detection results at the clause level for set 2. The performance of 'positive' label is not much enhanced but that of 'neutral' label is enhanced. We believe that this is because if the polarity of the top node word is explicitly 'positive' because of its inherent polarity the overall polarity of the clause is obviously 'positive' regardless of the result of the polarity propagation decision. On the other hand, in the case of 'neutral' clause, the correct polarity propagation decision for 'UNSHIFT' is critical for detecting the overall polarity. This confirms that our rules have a critical role in detecting the sentiment of 'neutral' sentences. By the importance of 'neutral' polarity, we conducted an error analysis on 18 False Positive cases for 'neutral' polarity as shown in Table 10 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 46,
                        "end": 53,
                        "text": "Table 8",
                        "ref_id": null
                    },
                    {
                        "start": 558,
                        "end": 565,
                        "text": "Table 9",
                        "ref_id": null
                    },
                    {
                        "start": 1373,
                        "end": 1381,
                        "text": "Table 10",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Data Sets # for exp. Total %",
                "sec_num": null
            },
            {
                "text": "Types Description #",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "The overall sentiment should be detected by further deep linguistic analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Implicit sentiment",
                "sec_num": null
            },
            {
                "text": "The 'UNSHIFT' result is incorrect.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incorrect 'UNSHIFT'",
                "sec_num": "8"
            },
            {
                "text": "3",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incorrect 'UNSHIFT'",
                "sec_num": "8"
            },
            {
                "text": "The polarity result is incorrect due to other lexical entries 3",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incorrect polarity",
                "sec_num": null
            },
            {
                "text": "The propagation is made incorrectly due to incorrect dependency relation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parsing errors",
                "sec_num": null
            },
            {
                "text": "Comparison without 'Positive/Negative' sentiment 2 Table 10 . Error distribution",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 51,
                        "end": 59,
                        "text": "Table 10",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "We note that the reason for considering specific sentence types as addressed in this paper is that we assume that these sentences are better suited to demonstrate the need for blocking the propagation of the polarity of the given adjectival modifier.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "Although we considered certain types in a limited way, we haven't fully observed what types of sentence are actually involved in propagation. In addition, we found that some sentences in the data set we considered initially as having the sentence type that blocks the propagation of polarity of the adjectival modifier do not convey 'neutral' but convey 'positive' or 'negative' polarity implicitly as shown in Example (8).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "(8) a. If there is a more perfect shampoo, I haven't found it. b. Previously, I had to visit my favorite store more than once to get my size. c. I've had it for a year and the elastic is totally stretched out with normal wear.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "The main clause in (8a) conveys 'positive' polarity implicitly even though there is no polaritybearing word. Further processing is necessary including a proper account of negation. The phrases in (8b) and (8c) are about product entities contrastive to the currently reviewed product so that the inherently assigned polarity of 'favorite' and 'normal' is not applicable to the currently reviewed product. In order to detect the implicit intention of this kind, we should also detect the clues for contrast such as 'previously' or the relation between the phrases 'elastic' and 'be stretched out'. Although the propagation decision for 'UN-SHIFT' itself is correct, such inherent polarity of the adjectival modifier may help to identify the implicit sentiment of the adjacent clause as shown in Example (9). (9) a. I washed them repeatedly in my very efficient and eco-friendly Asko washer, but the smell remained. b. I have paid much more for inferior brand jeans and I can say that I won't be doing that anymore.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "The implicit polarity of the underlined clause in (9a) may be both 'negative' and 'positive' depending on the context. By utilizing both the inherent polarity of 'efficient' and the role of the conjunction 'but', the conventional polarity detection rule along with conjuncts (Meena and Prabhakar, 2007) can correctly detect its polarity as 'negative'. As for (9b), by the inherent polarity 'negative' of 'inferior' and negation on the underlined clause we can detect the 'positive' polarity of the underlined clause. However, the possibility of the correctness of the detection is still chancy, and a further analysis of the underlying meaning of the clause or the sentence is called for. For example, if we label the clause containing 'inferior' in (9b) as 'action for goal achievement', we can detect the polarity of the underlined clause as 'negative' by the rule taking such label and another label related to its continuity.",
                "cite_spans": [
                    {
                        "start": 275,
                        "end": 302,
                        "text": "(Meena and Prabhakar, 2007)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Others",
                "sec_num": "2"
            },
            {
                "text": "In this paper, we refined the previous polarity propagation rules in order to better decide whether the polarity of the adjectival modifier should be propagated or not. We considered both phrase-level and clause-level clues by considering syntactic and semantic types of nouns and verbs. Our rules incorporating these clues into the basic rules detected the 'UNSHIFT' case particularly well. The detection results of the overall sentiment at the clause level are meaningfully enhanced as compared to those based on the previous polarity propagation rules regarding especially 'neutral' sentences. However, despite the correct decision for 'UNSHIFT', we found that such polarity of the modifiers may also help to identify the implicit sentiment without further deeper linguistic analysis. In order to detect implicit sentiment, we will examine the clues for detecting contrast among product entities or product features for the future work. We will also classify the roles of the clause at a fine-grained level that is related to the detection of the implicit sentiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            }
        ],
        "back_matter": [
            {
                "text": "This work was supported in part by the National Research Foundation of Korea (NRF) grant funded by the Korea government (MEST) (No. 2011-0018262), and in part by the Intelligent Robotics Development Program, one of the 21st Century Frontier R&D Programs funded by the Ministry of Knowledge Economy of Korea. We thank the three anonymous reviewers for helpful comments and insightful suggestions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF1": {
                "ref_id": "b1",
                "title": "The Berkeley FrameNet Project",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Lowe",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Association for Computational Linguistics",
                "volume": "1",
                "issue": "",
                "pages": "86--90",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lowe. 1998. The Berkeley FrameNet Project. In Proceedings of COLING, Volume 1, pp. 86-90, Morristown, NJ, USA, Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Biographies, Bollywood, Boom-boxes and Blenders: Domain Adaptation for Sentiment Classification",
                "authors": [
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Blitzer",
                        "suffix": ""
                    },
                    {
                        "first": "Mark",
                        "middle": [],
                        "last": "Dredze",
                        "suffix": ""
                    },
                    {
                        "first": "Fernando",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of ACL",
                "volume": "",
                "issue": "",
                "pages": "440--447",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "John Blitzer, Mark Dredze, and Fernando Pereira. 2007. Biographies, Bollywood, Boom-boxes and Blenders: Domain Adaptation for Sentiment Clas- sification. In Proceedings of ACL, pp. 440-447, Prague, Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Learning with Compositional Semantics as Structural Inference for Subsentential Sentiment Analysis",
                "authors": [
                    {
                        "first": "Yejin",
                        "middle": [],
                        "last": "Choi",
                        "suffix": ""
                    },
                    {
                        "first": "Claire",
                        "middle": [],
                        "last": "Cardie",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of HLT/EMNLP",
                "volume": "",
                "issue": "",
                "pages": "793--801",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yejin Choi and Claire Cardie. 2008. Learning with Compositional Semantics as Structural Inference for Subsentential Sentiment Analysis, In Proceed- ings of HLT/EMNLP, pp. 793-801, Honolulu, Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Mining and summarizing customer reviews",
                "authors": [
                    {
                        "first": "Minqing",
                        "middle": [],
                        "last": "Hu",
                        "suffix": ""
                    },
                    {
                        "first": "Bing",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of ACM SIGKDD",
                "volume": "",
                "issue": "",
                "pages": "168--177",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews, In Proceedings of ACM SIGKDD, pp. 168-177, ACM Press.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Sentiment Classification of Movie and Product Reviews Using Contextual Valence Shifters",
                "authors": [
                    {
                        "first": "Alistair",
                        "middle": [],
                        "last": "Kennedy",
                        "suffix": ""
                    },
                    {
                        "first": "Diana",
                        "middle": [],
                        "last": "Inkpen",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Computational Intelligence",
                "volume": "22",
                "issue": "2",
                "pages": "110--125",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alistair Kennedy and Diana Inkpen. 2006. Sentiment Classification of Movie and Product Reviews Us- ing Contextual Valence Shifters, Computational Intelligence 22(2):110-125.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Accurate Unlexicalized Parsing",
                "authors": [
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Klein",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [
                            "D"
                        ],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of ACL",
                "volume": "",
                "issue": "",
                "pages": "423--430",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dan Klein and Christopher D. Manning. 2003. Accu- rate Unlexicalized Parsing. In Proceedings of ACL, pp. 423-430, Sapporo, Japan, Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Sentence Level Sentiment Analysis in the Presence of Conjuncts Using Linguistic Analysis",
                "authors": [
                    {
                        "first": "Arun",
                        "middle": [],
                        "last": "Meena",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [
                            "V"
                        ],
                        "last": "Prabhakar",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of ECIR 2007",
                "volume": "4425",
                "issue": "",
                "pages": "573--580",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Arun Meena and T.V. Prabhakar. 2007. Sentence Level Sentiment Analysis in the Presence of Con- juncts Using Linguistic Analysis, In Proceedings of ECIR 2007, LNCS 4425, pp. 573-580.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Generating Typed Dependency Parses from Phrase Structure Parses",
                "authors": [
                    {
                        "first": "Marie-Catherine",
                        "middle": [],
                        "last": "De Marneffe",
                        "suffix": ""
                    },
                    {
                        "first": "Bill",
                        "middle": [],
                        "last": "Maccartney",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Christopher",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of LREC 2006",
                "volume": "",
                "issue": "",
                "pages": "449--454",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Marie-Catherine de Marneffe, Bill MacCartney and Christopher D. Manning. 2006. Generating Typed Dependency Parses from Phrase Structure Parses. In Proceedings of LREC 2006, pp. 449-454.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "WORDNET: A Lexical Database for English",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "George",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Miller",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Communications of ACM",
                "volume": "",
                "issue": "11",
                "pages": "39--41",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "George A. Miller. 1995. WORDNET: A Lexical Da- tabase for English. Communications of ACM (11): pp. 39-41.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Sentiment Composition",
                "authors": [
                    {
                        "first": "Karo",
                        "middle": [],
                        "last": "Moilanen",
                        "suffix": ""
                    },
                    {
                        "first": "Stephen",
                        "middle": [],
                        "last": "Pulman",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of RANLP-2007",
                "volume": "",
                "issue": "",
                "pages": "378--382",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Karo Moilanen and Stephen Pulman. 2007. Sentiment Composition. In Proceedings of RANLP-2007, pp. 378-382, Borovets, Bulgaria.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Semantically distinct verb classes involved in sentiment analysis",
                "authors": [
                    {
                        "first": "Alena",
                        "middle": [],
                        "last": "Neviarouskaya",
                        "suffix": ""
                    },
                    {
                        "first": "Helmut",
                        "middle": [],
                        "last": "Prendinger",
                        "suffix": ""
                    },
                    {
                        "first": "Mitsuru",
                        "middle": [],
                        "last": "Ishizuka",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of IADIS International Conference Applied Computing",
                "volume": "",
                "issue": "",
                "pages": "27--34",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alena Neviarouskaya, Helmut Prendinger, and Mitsuru Ishizuka. 2009. Semantically distinct verb classes involved in sentiment analysis, In Pro- ceedings of IADIS International Conference Applied Computing, pp. 27-34.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Contextual valence shifters",
                "authors": [
                    {
                        "first": "Livia",
                        "middle": [],
                        "last": "Polanyi",
                        "suffix": ""
                    },
                    {
                        "first": "Annie",
                        "middle": [],
                        "last": "Zaenen",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Working Notes of the AAAI Spring Symposium on Exploring Attitude and Affect in Text: Theories and Applications",
                "volume": "",
                "issue": "",
                "pages": "106--111",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Livia Polanyi and Annie Zaenen. 2004. Contextual valence shifters, In Working Notes of the AAAI Spring Symposium on Exploring Attitude and Affect in Text: Theories and Applications, pp. 106-111, Menlo Park, CA, The AAAI Press.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Extracting product features and opinions from reviews",
                "authors": [
                    {
                        "first": "Ana-Maria",
                        "middle": [],
                        "last": "Popescu",
                        "suffix": ""
                    },
                    {
                        "first": "Oren",
                        "middle": [],
                        "last": "Etzioni",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of HLT/EMNLP",
                "volume": "",
                "issue": "",
                "pages": "339--346",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ana-Maria Popescu and Oren Etzioni. 2005. Extract- ing product features and opinions from reviews, In Proceedings of HLT/EMNLP, pp. 339-346,",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Association for Computational Linguistics",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Vancouver",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vancouver, Association for Computational Lin- guistics.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Learning extraction patterns for subjective expressions",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    },
                    {
                        "first": "Janyce",
                        "middle": [],
                        "last": "Wiebe",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of EMNLP",
                "volume": "",
                "issue": "",
                "pages": "105--112",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff and Janyce Wiebe. 2003. Learning ex- traction patterns for subjective expressions. In Proceedings of EMNLP, pp. 105-112, Sapporo, Japan, Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Assessing sentiment of text by semantic dependency and contextual valence analysis",
                "authors": [
                    {
                        "first": "Mostafa Al Masum",
                        "middle": [],
                        "last": "Shaikh",
                        "suffix": ""
                    },
                    {
                        "first": "Helmut",
                        "middle": [],
                        "last": "Prendinger",
                        "suffix": ""
                    },
                    {
                        "first": "Ishizuka",
                        "middle": [],
                        "last": "Mitsuru",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of ACII 2007",
                "volume": "4738",
                "issue": "",
                "pages": "191--202",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mostafa Al Masum Shaikh, Helmut Prendinger, and Ishizuka Mitsuru, I. 2007. Assessing sentiment of text by semantic dependency and contextual va- lence analysis, In Proceedings of ACII 2007, LNCS 4738, pp. 191-202.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Recognizing Contextual Polarity in Phrase-Level Sentiment Analysis",
                "authors": [
                    {
                        "first": "Theresa",
                        "middle": [],
                        "last": "Wilson",
                        "suffix": ""
                    },
                    {
                        "first": "Janyce",
                        "middle": [],
                        "last": "Wiebe",
                        "suffix": ""
                    },
                    {
                        "first": "Paul",
                        "middle": [],
                        "last": "Hoffman",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "347--354",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Theresa Wilson, Janyce Wiebe, and Paul Hoffman. 2005. Recognizing Contextual Polarity in Phrase- Level Sentiment Analysis, In Proceedings of HLT/EMNLP, pp. 347-354, Vancouver, Associa- tion for Computational Linguistics.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Figure 1illustrates this case.",
                "num": null,
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF1": {
                "text": "Basic propagationNonetheless, we found that the polarity should not be propagated in some cases as shown in Example (1a).(1) a. Real 501's are made of 14 oz canvas-like material. b. It's a real 501's.",
                "num": null,
                "uris": null,
                "type_str": "figure"
            }
        }
    }
}