File size: 135,656 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 |
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:28:53.659990Z"
},
"title": "Twenty Years of Confusion in Human Evaluation: NLG Needs Evaluation Sheets and Standardised Definitions",
"authors": [
{
"first": "David",
"middle": [
"M"
],
"last": "Howcroft",
"suffix": "",
"affiliation": {
"laboratory": "The Interaction Lab, MACS",
"institution": "Heriot-Watt University",
"location": {
"settlement": "Edinburgh",
"country": "Scotland, UK"
}
},
"email": "d.howcroft@hw.ac.uk"
},
{
"first": "Anya",
"middle": [],
"last": "Belz",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Brighton",
"location": {
"settlement": "Brighton",
"region": "England",
"country": "UK"
}
},
"email": ""
},
{
"first": "Miruna",
"middle": [],
"last": "Clinciu",
"suffix": "",
"affiliation": {
"laboratory": "The Interaction Lab, MACS",
"institution": "Heriot-Watt University",
"location": {
"settlement": "Edinburgh",
"country": "Scotland, UK"
}
},
"email": ""
},
{
"first": "Dimitra",
"middle": [],
"last": "Gkatzia",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Edinburgh Napier University",
"location": {
"settlement": "Edinburgh",
"country": "Scotland, UK"
}
},
"email": ""
},
{
"first": "Sadid",
"middle": [
"A"
],
"last": "Hasan",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "CVS Health",
"location": {
"settlement": "Wellesley",
"region": "MA",
"country": "USA"
}
},
"email": ""
},
{
"first": "Saad",
"middle": [],
"last": "Mahamood",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Simon",
"middle": [],
"last": "Mille",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Universitat Pompeu Fabra",
"location": {
"settlement": "Barcelona",
"country": "Spain"
}
},
"email": ""
},
{
"first": "Emiel",
"middle": [],
"last": "Van Miltenburg",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Tilburg University",
"location": {
"settlement": "Tilburg",
"country": "Netherlands"
}
},
"email": ""
},
{
"first": "Sashank",
"middle": [],
"last": "Santhanam",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of North Carolina at Charlotte",
"location": {
"settlement": "Charlotte",
"region": "NC",
"country": "USA"
}
},
"email": ""
},
{
"first": "Verena",
"middle": [],
"last": "Rieser",
"suffix": "",
"affiliation": {
"laboratory": "The Interaction Lab, MACS",
"institution": "Heriot-Watt University",
"location": {
"settlement": "Edinburgh",
"country": "Scotland, UK"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Human assessment remains the most trusted form of evaluation in NLG, but highly diverse approaches and a proliferation of different quality criteria used by researchers make it difficult to compare results and draw conclusions across papers, with adverse implications for meta-evaluation and reproducibility. In this paper, we present (i) our dataset of 165 NLG papers with human evaluations, (ii) the annotation scheme we developed to label the papers for different aspects of evaluations, (iii) quantitative analyses of the annotations, and (iv) a set of recommendations for improving standards in evaluation reporting. We use the annotations as a basis for examining information included in evaluation reports, and levels of consistency in approaches, experimental design and terminology, focusing in particular on the 200+ different terms that have been used for evaluated aspects of quality. We conclude that due to a pervasive lack of clarity in reports and extreme diversity in approaches, human evaluation in NLG presents as extremely confused in 2020, and that the field is in urgent need of standard methods and terminology.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "Human assessment remains the most trusted form of evaluation in NLG, but highly diverse approaches and a proliferation of different quality criteria used by researchers make it difficult to compare results and draw conclusions across papers, with adverse implications for meta-evaluation and reproducibility. In this paper, we present (i) our dataset of 165 NLG papers with human evaluations, (ii) the annotation scheme we developed to label the papers for different aspects of evaluations, (iii) quantitative analyses of the annotations, and (iv) a set of recommendations for improving standards in evaluation reporting. We use the annotations as a basis for examining information included in evaluation reports, and levels of consistency in approaches, experimental design and terminology, focusing in particular on the 200+ different terms that have been used for evaluated aspects of quality. We conclude that due to a pervasive lack of clarity in reports and extreme diversity in approaches, human evaluation in NLG presents as extremely confused in 2020, and that the field is in urgent need of standard methods and terminology.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Evaluating natural language generation (NLG) systems is notoriously complex: the same input can be expressed in a variety of output texts, each valid in its own context, making evaluation with automatic metrics far more challenging than in other NLP contexts (Novikova et al., 2017; Reiter and Belz, 2009) . Human evaluations are commonly viewed as a more reliable way to evaluate NLG systems (Celikyilmaz et al., 2020; Gatt and Krahmer, 2018) , but come with their own issues, such as cost and time involved, the need for domain expertise (Celikyilmaz et al., 2020) , and the fact that the experimental setup has a substantial impact on the reliability of human quality judgements (Novikova et al., 2018; Santhanam and Shaikh, 2019) .",
"cite_spans": [
{
"start": 259,
"end": 282,
"text": "(Novikova et al., 2017;",
"ref_id": "BIBREF13"
},
{
"start": 283,
"end": 305,
"text": "Reiter and Belz, 2009)",
"ref_id": "BIBREF17"
},
{
"start": 393,
"end": 419,
"text": "(Celikyilmaz et al., 2020;",
"ref_id": "BIBREF4"
},
{
"start": 420,
"end": 443,
"text": "Gatt and Krahmer, 2018)",
"ref_id": "BIBREF7"
},
{
"start": 540,
"end": 566,
"text": "(Celikyilmaz et al., 2020)",
"ref_id": "BIBREF4"
},
{
"start": 682,
"end": 705,
"text": "(Novikova et al., 2018;",
"ref_id": "BIBREF14"
},
{
"start": 706,
"end": 733,
"text": "Santhanam and Shaikh, 2019)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Moreover, there is little consensus about how human evaluations should be designed and reported. Methods employed and details reported vary widely, issues including missing details (e.g. number of evaluators, outputs evaluated, and ratings collected), lack of proper analysis of results obtained (e.g. effect size and statistical significance), and much variation in names and definitions of evaluated aspects of output quality (van der Lee et al., 2019; Amidei et al., 2018) . However, we currently lack a complete picture of the prevailing consensus, or lack thereof, regarding approaches to human evaluation, experimental design and terminology.",
"cite_spans": [
{
"start": 428,
"end": 454,
"text": "(van der Lee et al., 2019;",
"ref_id": "BIBREF10"
},
{
"start": 455,
"end": 475,
"text": "Amidei et al., 2018)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Our goal in this work, therefore, is to investigate the extent of the above issues and provide a clear picture of the human evaluations NLG currently employs, how they are reported, and in what respects they are in need of improvement. To this end, we examined 20 years of NLG papers that reported some form of human evaluation, capturing key information about the systems, the quality criteria employed, and how these criteria were operationalised in specific experimental designs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The primary contributions of this paper are (1) an annotation scheme and guidelines for identifying characteristics of human evaluations reported in NLG papers; (2) a dataset containing all 165 INLG/ENLG papers with some form of human evaluation published in 2000-2019, annotated with the scheme, and intended to facilitate future research on this topic; (3) analyses of our dataset and annotations, including analysis of quality criteria used in evaluations, and the similarities and differences between them; and (4) a set of recommendations to help improve clarity in reporting evaluation details.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We selected papers for inclusion in this study following the PRISMA methodology (Moher et al., 2009) recently introduced to NLP by Reiter (2018) in his structured review of the validity of BLEU.",
"cite_spans": [
{
"start": 80,
"end": 100,
"text": "(Moher et al., 2009)",
"ref_id": "BIBREF12"
},
{
"start": 131,
"end": 144,
"text": "Reiter (2018)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Paper Selection",
"sec_num": "2"
},
{
"text": "As summarised in Table 1 , we began by considering all 578 papers published at the main SIGGEN venue(s): the International Natural Language Generation Conference (INLG) and the European Workshop on Natural Language Generation (ENLG), which were merged in 2016.",
"cite_spans": [],
"ref_spans": [
{
"start": 17,
"end": 24,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Paper Selection",
"sec_num": "2"
},
{
"text": "While many papers on NLG are published in other venues, including the *ACL conferences, EMNLP, AAAI, IJCAI, etc., focusing on INLG and ENLG provides a simple selection criterion which at the same time ensures a set of papers representative of what researchers specialising in NLG were doing across this time period. We screened the 578 papers looking for mention of a human evaluation, first by skimming for relevant section headings and then by searching in the PDFs for 'human', 'subject', and 'eval'. This left 217 papers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Paper Selection",
"sec_num": "2"
},
{
"text": "During annotation (Section 3), we retained only papers that reported a human evaluation in the following sense: an experiment involving assessment of system outputs in terms of an explicitly or implicitly given quality criterion, either via (1) conscious assessment of outputs in terms of the criterion by evaluators (e.g. (dis)agreement with quality statement, direct and relative assessment, qualitative feedback); or (2) counts and other measurements of outputs and user interactions with them (e.g. user-text and user-system interaction measurements, task performance measurements).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Paper Selection",
"sec_num": "2"
},
{
"text": "We decided to allow evaluations matching the above conditions even if they did not evaluate system generated texts. This allowed the inclusion of papers which, e.g., assess wizard-of-oz or corpus texts to inform the design of an NLG system. Figure 1 shows the distribution of the 165 papers meeting these conditions across publication years. The general increase of papers with human evaluations since 2012 aligns with the evaluation Stage Source Count 1 INLG / ENLG papers 2000-2019 578 2 Likely with human evaluations 217 3 Confirmed human evals (full dataset) 165 trends found by Gkatzia and Mahamood (2015) , who also reported an increase in the proportion of papers with intrinsic human evaluations between 2012-2015 compared to 2005-2008. However, only 28.54% of the papers in our sample contained a human evaluation compared to 45.4% reported by Gkatzia and Mahamood (2015) .",
"cite_spans": [
{
"start": 592,
"end": 619,
"text": "Gkatzia and Mahamood (2015)",
"ref_id": "BIBREF9"
},
{
"start": 862,
"end": 889,
"text": "Gkatzia and Mahamood (2015)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [
{
"start": 241,
"end": 249,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 434,
"end": 534,
"text": "Stage Source Count 1 INLG / ENLG papers 2000-2019 578 2 Likely with human evaluations 217 3",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Paper Selection",
"sec_num": "2"
},
{
"text": "In order to quantitatively study the evaluations in our dataset, we needed a systematic way of collecting information about different aspects of evaluations. Therefore, we developed an annotation scheme to capture different characteristics of evaluations, allowing us to investigate how human evaluations have been designed and reported in NLG over the past two decades, in particular what conventions, similarities and differences have emerged. Below, we summarise our approach to studying aspects of quality assessed in evaluations (Section 3.1), present the final annotation scheme (Section 3.2), describe how we developed it (Section 3.3), and assessed inter-annotator agreement (IAA) (Section 3.4). 1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Paper Annotation",
"sec_num": "3"
},
{
"text": "Researchers use the same term to describe the aspect of quality they are evaluating with sometimes very different meaning. Annotating (and later analysing) only such terms as are used in our papers would have restricted us to reporting occurrences of the terms, without any idea of where the same thing was in fact evaluated. We would not have been able to report even that, say, Readability is the nth most frequently evaluated aspect of quality, because not all papers in which Readability results are reported mean the same thing by it.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Aspects of quality",
"sec_num": "3.1"
},
{
"text": "We wanted to be able to quantitatively study both usage of terms such as Readability, and the meanings associated with them in different papers. Side-stepping the question of whether there is a single, 'true' concept of say Readability that evaluations could aim to assess, we simply tried to determine, on the basis of all the information provided in a paper, which sets of evaluations assessed aspects of quality similar enough to be considered the same (see Section 3.2.2). This resulted in similarity groups which we assigned normalised names to, yielding a set of common-denominator terms for the distinct aspects of quality that were assessed, regardless of what terms authors used for them.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Aspects of quality",
"sec_num": "3.1"
},
{
"text": "Below we refer to evaluated aspects of quality as quality criteria and the terms used to refer to different criteria as quality criteria names. Any name and definition capturing an aspect of quality can be a quality criterion. We do not wish to imply that there exists a set of 'true' quality criteria, and leave open in this paper the question of how such quality criteria relate to constructs with similar names researched in other fields such as linguistics and psycholinguistics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Aspects of quality",
"sec_num": "3.1"
},
{
"text": "The annotation scheme consists of seven closedclass and nine open-class attributes that capture different aspects of human evaluation methods and fall into three categories: (1) four System attributes which describe evaluated NLG systems, (2) four Quality criterion attributes which describe the aspect(s) of quality assessed in evaluations, and (3) eight Operationalisation attributes which describe how evaluations are implemented. Definitions and examples for all attributes can be found in the annotation guidelines in the Supplementary Material.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation scheme",
"sec_num": "3.2"
},
{
"text": "The four attributes in this category cover the following properties of systems: language (as per ISO 639-3 (2019)), system input and system output (raw/structured data, deep and shallow linguistic representation, different types of text (sentence, documents etc.)), and task (e.g. data-to-text generation, dialogue turn generation, summarisation).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "System attributes",
"sec_num": "3.2.1"
},
{
"text": "The most challenging aspect of selecting values for the system attributes was the lack of clarity in many papers about inputs/outputs. Where the information was clearly provided, in some cases it proved difficult to decide which of two adjacent attribute values to select; e.g. for system output, single vs. multiple sentences, and for system input, structured data vs. deep linguistic representation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "System attributes",
"sec_num": "3.2.1"
},
{
"text": "The attributes in this category are verbatim criterion name and verbatim criterion definition (both as found in the paper), normalised criterion name (see below), and paraphrased criterion definition (capturing the annotator's best approximation of what was really evaluated in the paper).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality criterion attributes",
"sec_num": "3.2.2"
},
{
"text": "As mentioned above, to make it possible to report both on usage of quality criterion names, and on similarities and differences between what was really evaluated, we devised a set of normalised quality criterion names that would allow us to see how many distinct quality criteria are currently being used, and relate these to results from our other analyses. The normalised criterion names were determined by performing bottom-up clustering and renaming of values selected for the attributes verbatim criterion definition, paraphrased criterion definition, verbatim question/prompt and paraphrased question/prompt (see Section 3.2.3).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality criterion attributes",
"sec_num": "3.2.2"
},
{
"text": "We counted 478 occurrences of (verbatim) quality criterion names in papers, mapping to 204 unique names. The clustering and renaming process above produced 71 criterion names which we consider truly distinct and which represent our set of normalised quality criteria. This means that in our analysis, 71 distinct evaluation criteria have been used in the last 20 years in NLG, not 204.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality criterion attributes",
"sec_num": "3.2.2"
},
{
"text": "Some of the normalised criteria are less specific than others, and can be further specified to yield one of the other criteria, implying hierarchical relationships between some criteria. For example, a criterion might measure the overall Correctness of the Surface Form of a text (less specific), or it might more specifically measure its Grammatical-ity or Spelling Accuracy. Using the classification system for human evaluations proposed by Belz et al. (2020) to provide the top two levels and some branching factors, we developed the hierarchical relationships between quality criteria into a taxonomy to help annotators select values (Appendix E). The set of normalised quality criteria names and definitions is provided in Appendix D.",
"cite_spans": [
{
"start": 443,
"end": 461,
"text": "Belz et al. (2020)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Quality criterion attributes",
"sec_num": "3.2.2"
},
{
"text": "Common issues we encountered in selecting values for the normalised quality criterion attribute were underspecified or unclear quality criterion definitions in papers, missing definitions (279 out of 478), missing prompts/questions for the evaluators (311/478), and missing criterion names (98/478). The more of this is missing in a paper, the more difficult it is to see beyond the information provided by authors to form a view of what is actually being evaluated, hence to choose a value for the normalised criterion name attribute.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality criterion attributes",
"sec_num": "3.2.2"
},
{
"text": "The eight attributes in this category record different aspects of how responses are collected in evaluations: the form of response elicitation (direct, vs. relative quality estimation, (dis)agreement with quality statement, etc.), the verbatim question/prompt used in the evaluation and included in the paper, a paraphrased question/prompt for those cases where the paper does not provide the verbatim question/prompt, the data type of the collected responses (categorical, rank order, count, ordinal, etc.), the type of rating instrument from which response variable values are chosen (numerical rating scale, slider scale, verbal descriptor scale, Likert scale, etc.), the size of rating instrument (number of possible response values), the range of response values and any statistics computed for response values.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Operationalisation attributes",
"sec_num": "3.2.3"
},
{
"text": "We found that for most papers, determining the type and size of scale or rating instrument is straightforward, but the large majority of papers do not provide details about the instructions, questions or prompts shown to evaluators; this was doubly problematic because we often relied on such information to determine what was being evaluated.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Operationalisation attributes",
"sec_num": "3.2.3"
},
{
"text": "The annotation scheme was developed in four phases, resulting in four versions of the annotations with two IAA tests (for details of which see Section 3.4), once between the second and third version of the scheme, and once between the third and fourth. From each phase to the next, we tested and subsequently improved the annotation scheme and guidelines. Annotations in all versions were carried out by the first nine authors, in roughly equal proportions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation scheme development",
"sec_num": "3.3"
},
{
"text": "In the first phase, most of the 165 papers in our final dataset (Table 1) were annotated and then double-checked by two different annotators using a first version of the annotation scheme that did not have formal guidelines.",
"cite_spans": [],
"ref_spans": [
{
"start": 64,
"end": 73,
"text": "(Table 1)",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Annotation scheme development",
"sec_num": "3.3"
},
{
"text": "The double-checking revealed considerable differences between annotators, prompting us to formalise the annotation scheme and create detailed instructions, yielding Version 1.0 of the annotation guidelines. IAA tests on new annotations carried out with these guidelines revealed low agreement among annotators (see Table 2 , 1 st IAA test), in particular for some of the attributes we were most interested in, including system task, type of rating instrument, and normalised quality criterion.",
"cite_spans": [],
"ref_spans": [
{
"start": 315,
"end": 322,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Annotation scheme development",
"sec_num": "3.3"
},
{
"text": "We therefore revised the annotation scheme once more, reducing the number of free-text attributes, and introducing automated consistency checking and attribute value suggestions. Using the resulting V2.0 scheme and guidelines, we re-annotated 80 of the papers, this time pairing up annotators for the purpose of agreeing consensus annotations. We computed, and Table 2 reports, three sets of IAA scores on the V2.0 annotations: for all nine annotators separately ('9 solo'), for the 4 consensus annotations ('4 duo'), and for the 5 annotators whose solo annotations agreed most with everyone else's, shown in the '5 best' column. There was an overall improvement in agreement (substantial in the case of some attributes), but we decided to carry out one final set of improvements to definitions and instructions in the annotation guidelines (with minimal changes to attribute names and values), yielding version 2.1 which was then used for the final annotation of all 165 papers in our dataset, on which all analyses in this paper are based.",
"cite_spans": [],
"ref_spans": [
{
"start": 361,
"end": 368,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Annotation scheme development",
"sec_num": "3.3"
},
{
"text": "Papers for IAA tests: For each IAA test we manually selected a different arbitrary set of 10 NLG papers with human evaluations from ACL 2020.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "Preprocessing: We cleaned up attribute values selected by annotators by normalising spelling, punctuation, and capitalisation. For the first annotation round which allowed empty cells, we replaced those with 'blank.' We also removed papers not meeting the conditions from Section 2.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "Calculating agreement: The data resulting from annotation was a 10 (papers) \u00d7 n (quality criteria identified by annotator in paper) \u00d7 16 (attribute value pairs) data frame, for each of the annotators. The task for IAA assessment was to measure the agreement across multiple data frames (one for each annotator) allowing for different numbers of criteria being identified by different authors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "We did this by calculating Krippendorff's alpha using Jaccard for the distance measure (recommended by Artstein and Poesio 2008) . Scores for the seven closed-class attributes are shown in Table 2 for each of the two IAA tests (column headings as explained in the preceding section).",
"cite_spans": [
{
"start": 103,
"end": 128,
"text": "Artstein and Poesio 2008)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [
{
"start": 189,
"end": 196,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "The consensus annotations ('duo') required pairs of annotators to reach agreement about selected attribute values. This reduced disagreement and improved consistency with the guidelines, the time it took was prohibitive.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "For the attributes task, data type, and type of rating instrument (shortened to 'instrument' in the table), we consider the '5 best' IAA to be very good (0 indicating chance-level agreement). For system input and output, IAA is still good, with the main source of disagreement the lack of clarity about text size/type in textual inputs/outputs. Replacing the different text size/type values with a single 'text' value improves IAA to 0.41 and 1.00 for inputs and outputs, respectively. The remaining issues for inputs are to do with multiple inputs and distinguishing structured data from deep linguistic representations, which prompted us to merge the two data input types.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "Low agreement for normalised quality criteria is in part due to the lack of clear information about what aspect of quality is being assessed in papers, and the difficulty of distinguishing quality criteria from evaluation modes (see previous section). But cases where annotators mapped a single criterion name in the paper to multiple normalised criterion names were also a big factor because this substantially raises the bar for agreement.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inter-Annotator Agreement",
"sec_num": "3.4"
},
{
"text": "In this section, we present results from analyses performed on the annotations of the 165 papers in our dataset. The dataset and code for analysis are available in the project repository.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Analysis and Results",
"sec_num": "4"
},
{
"text": "The 165 papers in the dataset correspond to 478 individual evaluations assessing single quality criteria, i.e. 2.8 per paper. For the quality criterion attributes (Section 3.2.2) and the operationalisation attributes (Section 3.2.3) it makes most sense to compute occurrence counts on the 478 individual evaluations, even if that slightly inflates counts in some cases. For example, if multiple criteria are evaluated in the same experiment, should we really count multiple occurrences for every operationalisation attribute? But the alternatives are to either count per paper, leaving the question of what to do about multiple experiments in the same paper, or to count per experiment, leaving the problem of variation within the same experiment and also that it is not always clear whether separate experiments were carried out. For these reasons we opted to compute statistics at the individual-evaluation level for the quality-criterion and operationalisation attributes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Analysis and Results",
"sec_num": "4"
},
{
"text": "For the system attributes (Section 3.2.1), we report paper-level statistics. We do sometimes find more than one system type (with different language, input, output or task) being evaluated in a paper, but for those cases we add all attribute values found for the paper. Below we first report paper-level statistics for the system attributes (Section 4.1), followed by evaluation-level statistics for quality-criterion and operationalisation attributes (Section 4.2).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Analysis and Results",
"sec_num": "4"
},
{
"text": "Unsurprisingly, our analysis shows that the most frequent system language in our dataset is English, accounting for 82.14% of papers pre-2010, and 75.39% post-2010. Appendix A provides a detailed overview of results for this attribute.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Paper-level statistics",
"sec_num": "4.1"
},
{
"text": "In terms of the system task attribute, our analysis reveals that before 2010, data-to-text generation and dialogue turn generation were the most common tasks, whereas post-2010 the most common tasks are data-to-text generation, summarisation and dialogue turn generation. The biggest increases are for question generation (0 pre-2010, 9 post-2010), end-to-end generation (1 increasing to 8), and summarisation (1 going up to 11). 2 For the system output attribute, we found that a big majority of systems output single or multiple sentences. Appendix B and C show task and output frequencies in more detail. Table 3 provides an overview of the most frequent values selected for the form of response elicitation attribute. We found that direct quality estimation where outputs are scored directly one at a time, was most common (207 times), followed by relative quality estimation where multiple outputs are ranked (72 times). 3 To select values for this criterion, we relied on a combination of descriptions of the general experimental design, prompts/questions and instructions given to evaluators. We found that instructions to evaluators were almost never provided, example prompts/questions rarely, and even details of rating scales etc. were often missing.",
"cite_spans": [
{
"start": 926,
"end": 927,
"text": "3",
"ref_id": null
}
],
"ref_spans": [
{
"start": 608,
"end": 615,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Paper-level statistics",
"sec_num": "4.1"
},
{
"text": "What was usually clear was the type of scale or other rating instrument and its size and labels. From this, values for other operationalisation attributes such as form of response elicitation, data type of collected responses and range of response values could usually be deduced, but as can be seen Figure 2 : How many papers explicitly name and define all, some, or none of the quality criteria they evaluate.",
"cite_spans": [],
"ref_spans": [
{
"start": 300,
"end": 308,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Operationalisation attributes",
"sec_num": "4.2.1"
},
{
"text": "from Table 3 , for 15 individual evaluations (5 papers) even the response elicitation methods were unclear.",
"cite_spans": [],
"ref_spans": [
{
"start": 5,
"end": 12,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Operationalisation attributes",
"sec_num": "4.2.1"
},
{
"text": "In this section, our aim is to look at the criterion names and definitions as given in papers, and how they mapped to the normalised criterion names. As shown in Figure 2 at the paper level, not all papers name their quality criteria and worryingly, just over half give no definitions for any of their quality criteria. As noted in Section 3, where explicit criterion names and/or definitions were missing in papers, we used the remaining information provided in the paper to determine which aspect of quality was evaluated, and mapped this to our set of normalised quality criteria. Table 4 shows how often each normalised criterion occurs in our annotations of the 478 individual evaluations in the dataset. We can see that Usefulness for task/information need, Grammaticality, and Quality of outputs are the most frequently occurring normalised quality criterion names. Fluency which is one of the most frequent criterion names found in papers, ranks only (joint) seventh. Table 5 shows 10 example criterion names as used in papers, and how we mapped them to our normalised criterion names. For example, Fluency was mapped to 15 different (sets of) normalised names (reflecting what was actually evaluated), including many cases where multiple normalised criterion names were selected (indicated by the prefix 'multiple (n)').",
"cite_spans": [],
"ref_spans": [
{
"start": 162,
"end": 170,
"text": "Figure 2",
"ref_id": null
},
{
"start": 584,
"end": 591,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 976,
"end": 983,
"text": "Table 5",
"ref_id": "TABREF7"
}
],
"eq_spans": [],
"section": "Quality Criterion Names & Definitions",
"sec_num": "4.2.2"
},
{
"text": "It is not straightforward to interpret the information presented in Table 5 . Objectively, what it shows is that we chose a much larger number of quality criteria to map certain original quality criteria names to than others. Fluency has been mapped to by far the largest number of different normalised criteria. This in turn means that there was the largest amount of variation in how different authors defined and operationalised Fluency (because we determined the normalised criteria on the basis of similarity groups of original criteria). In other words, the papers that used Fluency divided into 15 subsets each with a distinct understanding of Fluency shared by members of the subset. 15 is a large number in this context and indicates a high level of disagreement, in particular combined with the presence of many multiple sets. Conversely, a criterion like Clarity has a high level of agreement (despite also being high frequency as shown in Table 4 ). Figure 3 shows a graphical representation of some of our mappings from original to normalised quality criteria in the form of a Sankey diagram, and illustrates the complexity of the correspondences between the two.",
"cite_spans": [],
"ref_spans": [
{
"start": 68,
"end": 75,
"text": "Table 5",
"ref_id": "TABREF7"
},
{
"start": 951,
"end": 958,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 962,
"end": 970,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Quality Criterion Names & Definitions",
"sec_num": "4.2.2"
},
{
"text": "Prompts and questions put to evaluators (e.g. how well does this text read?) often try to explain the aspect of quality that evaluators are supposed to be evaluating using descriptors other than the criterion name, and can end up explaining one criterion in terms of one or more others (e.g. for Fluency, how grammatical and readable is this text?). We found fifty cases where the prompt/question references multiple normalised criteria (two and more), with a mean of 2.48 (min = 2, max = 4, median = 2, stdev = 0.64). Table 6 lists pairs of criteria referenced in the same prompt/question, ordered by pair-level frequency. For example, there were four prompts/questions that referenced both Fluency and Grammaticality. There is evidence that questions combining multiple quality criteria cause more variation in the responses, because different participants may weigh the importance of one of the quality criteria differently in their response; such complex quality criteria may best be measured using multiple items rather than a single question (van der Lee et al., 2019).",
"cite_spans": [],
"ref_spans": [
{
"start": 519,
"end": 526,
"text": "Table 6",
"ref_id": "TABREF9"
}
],
"eq_spans": [],
"section": "Prompts/questions put to evaluators",
"sec_num": "4.2.3"
},
{
"text": "Perhaps the most compelling evidence we found in our analyses in this paper is that (i) there is very little shared practice in human evaluation in NLG, in particular with respect to what to name the aspects of quality we wish to evaluate, and how to define them; and (ii) the information presented in NLG papers about human evaluations is very rarely complete. The latter can be addressed through better reporting in future work (see below). The former is far less straightforward to address. One key observation from our data is that the same quality criterion names are often used by different authors to refer to very different aspects of quality, and that different names often refer to the same aspect of quality. We further found that more than half of the papers failed to define the criteria they evaluated, and about a quarter omitted to name the criteria being evaluated.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion & Recommendations",
"sec_num": "5"
},
{
"text": "Our analysis has emphasised the need for better reporting of details of evaluations in order to help readers understand what aspect of quality is being evaluated and how. It took the first nine authors of the paper 25-30 minutes on average even in the final round of annotations to annotate a single paper, a measure of how hard it currently is to locate information about evaluations in papers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion & Recommendations",
"sec_num": "5"
},
{
"text": "Based on this experience we have put together a list of what we see as reporting recommendations for human evaluations presented in Table 7 . The aim is to provide authors with a simple list of what information to include in reports of human evaluations at a minimum. The next step will be to develop the recommendations in Table 7 into a Human Evaluation Checklist giving full details of what to include in reports of human evaluation experiments, to complement existing recommendations for datasets and machine learning models, their intended uses, and potential abuses (Bender and Friedman, 2018; Gebru et al., 2018; Mitchell et al., 2019; Pineau, 2020; Ribeiro et al., 2020) , aimed at making \"critical information accessible that previously could only be found by users with great effort\" (Bender and Friedman, 2018) .",
"cite_spans": [
{
"start": 572,
"end": 599,
"text": "(Bender and Friedman, 2018;",
"ref_id": "BIBREF3"
},
{
"start": 600,
"end": 619,
"text": "Gebru et al., 2018;",
"ref_id": "BIBREF8"
},
{
"start": 620,
"end": 642,
"text": "Mitchell et al., 2019;",
"ref_id": "BIBREF11"
},
{
"start": 643,
"end": 656,
"text": "Pineau, 2020;",
"ref_id": "BIBREF15"
},
{
"start": 657,
"end": 678,
"text": "Ribeiro et al., 2020)",
"ref_id": "BIBREF18"
},
{
"start": 794,
"end": 821,
"text": "(Bender and Friedman, 2018)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [
{
"start": 132,
"end": 139,
"text": "Table 7",
"ref_id": "TABREF10"
},
{
"start": 324,
"end": 331,
"text": "Table 7",
"ref_id": "TABREF10"
}
],
"eq_spans": [],
"section": "Discussion & Recommendations",
"sec_num": "5"
},
{
"text": "We have presented our new dataset of 165 papers each annotated with 16 attribute values that encode different aspects of the human evaluations reported in them. We described the carefully developed and validated annotation scheme we created for this How do you define that quality criterion? Provide a definition for your criterion. It is okay to cite another paper for the definition; however, it should be easy for your readers to figure out what aspects of the text you wanted to evaluate.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6"
},
{
"text": "How are you collecting responses? Direct ratings, post-edits, surveys, observation? Rankings or rating scales with numbers or verbal descriptors? Provide the full prompt or question with the set of possible response values where applicable, e.g. when using Likert scales. instructions, prompts, and questions What are your participants responding to? Following instructions, answering a question, agreeing with a statement? The exact text you give your participants is important for anyone trying to replicate your experiments. In addition to the immediate task instructions, question or prompt, provide the full set of instructions as part of your experimental design materials in an appendix. purpose, and reported analyses and visualisations over the annotations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "OPERATIONALISATION instrument type",
"sec_num": null
},
{
"text": "Our analyses shed light on the kinds of evaluations NLG researchers have conducted and reported over the past 20 years. We have found a very high level of diversity of approaches, and fundamental gaps in reported details, including missing definitions of the aspect of quality being evaluated in about two-thirds of papers, and absence of basic details such as language, system input/output, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "OPERATIONALISATION instrument type",
"sec_num": null
},
{
"text": "We have proposed normalised quality criteria names and definitions to help us understand which evaluations actually evaluate the same thing. These are not intended as a set of standardised evaluation criteria that can be taken off the shelf and used. Rather, they are a first step in that direction. For a standardised set it would be desirable to ground evaluation criteria in related and much researched constructs in other fields. For example, there is a long history of studying readability (Chall, 1958; De Clercq et al., 2014) .",
"cite_spans": [
{
"start": 495,
"end": 508,
"text": "(Chall, 1958;",
"ref_id": "BIBREF5"
},
{
"start": 509,
"end": 532,
"text": "De Clercq et al., 2014)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "OPERATIONALISATION instrument type",
"sec_num": null
},
{
"text": "Our single main conclusion is that, as a field, we need to standardise experimental design and terminology, so as to make it easier to understand and compare the human evaluations we perform. English 46 95 141 German 2 5 Japanese 4 3 7 Spanish 1 3 4 Chinese 1 3 4 Dutch 1 3 4 Other (13 languages) 1 14 15 ",
"cite_spans": [],
"ref_spans": [
{
"start": 192,
"end": 320,
"text": "English 46 95 141 German 2 5 Japanese 4 3 7 Spanish 1 3 4 Chinese 1 3 4 Dutch 1 3 4 Other (13 languages)",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "OPERATIONALISATION instrument type",
"sec_num": null
},
{
"text": "Answerability from input: The degree to which an output (typically a question or problem) can be answered or solved with content/information from the input.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Appropriateness: The degree to which the output is appropriate in the given context/situation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Appropriateness (both form and content): The degree to which the output as a whole is appropriate in the given context/situation. E.g. \"does the text appropriately consider the parents' emotional state in the given scenario?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Appropriateness (content): The degree to which the content of the output is appropriate in the given context/situation. E.g. \"is the question coherent with other generated questions?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Appropriateness (form): The degree to which the form of the output is appropriate in the given context/situation. E.g. \"are the lexical choices appropriate given the target reader?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Clarity: The degree to which the meaning of an output is absorbed without effort, i.e. is easy to understand as well as possible to understand.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Coherence: The degree to which the content/meaning of an output is presented in a well-structured, logical and meaningful way. E.g. \"does the generated text accord with the correct logic?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "Cohesion: The degree to which the different parts of an output form a cohesive whole. Cohesion is the grammatical and lexical linking within a text or sentence that holds a text together and gives it meaning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Alphabetical list of quality criterion names and definitions",
"sec_num": null
},
{
"text": "The degree to which outputs are correct. Evaluations of this type ask in effect 'Is this output correct?' with criteria in child nodes adding more detail.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs in their own right: The degree to which an output is correct/accurate/true, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs in their own right (both form and content): The degree to which both the form and content of an output are correct, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs in their own right (content): The degree to which the content of an output is correct, looking only at the output. E.g. \"is this dictionary reference semantically complete?\" (best = no further info needed).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs in their own right (form): The degree to which the form of an output is correct, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs relative to external frame of reference: The degree to which an output is correct/accurate/true relative to a system-external frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs relative to external frame of reference (both form and content) : The degree to which the form and content of an output is correct/accurate/true relative to a system-external frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "Correctness of outputs relative to external frame of reference (content): The degree to which the content of an output is correct/accurate/true relative to a system-external frame of reference. E.g. \"are the contents of the text factually true?\" (best = no untrue facts).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs:",
"sec_num": null
},
{
"text": "The degree to which the form of an output is correct/accurate/true relative to a system-external frame of reference. E.g. \"does the generated question use correct named entity names as given in this database?\" (best = all as in database).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "Correctness of outputs relative to input: The degree to which an output is correct/accurate/true relative to the input.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "Correctness of outputs relative to input (both form and content): The degree to which the form and content of an output is correct/accurate/true relative to the input.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "Correctness of outputs relative to input (content): The degree to which the content of an output is correct/accurate/true relative to the input. E.g. \"is all the meaning of the input preserved?\", \"to what extent does the generated text convey the information in the input table?\" (best = all the information).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "Correctness of outputs relative to input (form): The degree to which the form of an output is correct/accurate/true relative to the input. E.g. \" how similar are the words to the input?\" (best = same).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "Detectability of controlled feature [PROPERTY]: The degree to which a property that the outputs are intended to have (i.e. because it's controlled by input to the generation process) is detectable in the output. Open class criterion; PROPERTY can be a wide variety of different things, e.g. conversational, meaningful, poetic, vague/specific, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correctness of outputs relative to external frame of reference (form):",
"sec_num": null
},
{
"text": "The degree to which the outputs make communication easy, typically in a dialogue situation. E.g. \"how smoothly did the conversation go with the virtual agent?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Effect on reader/listener [EFFECT]: The degree to which an output has an EFFECT in the listener/reader. Open class criterion; EFFECT can be a wide variety of different things, e.g. inducing a specific emotional state, inducing behaviour change, etc. E.g. measuring how much the user learnt from reading the output; \"are you feeling sad after reading the text?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Fluency: The degree to which a text 'flows well' and is not e.g. a sequence of unconnected parts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness as system explanation: Degree to which an output is satisfactory as an explanation of system behaviour. E.g. \"does the text provide an explanation that helps users understand the decision the system has come to?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs (excluding correctness): The degree to which outputs are good. Evaluations of this type ask in effect 'Is this output good?' with criteria in child nodes adding more detail.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs in their own right: The degree to which an output is good, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs in their own right (both form and content): The degree to which the form and content of an output are good, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs in their own right (content): The degree to which the content of an output is good, looking only at the output.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs in their own right (form): The degree to which the form of an output is good, looking only at the output. E.g. \"is the generated response a complete sentence?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to external frame of reference: The degree to which an output is good relative to a system-external frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to grounding: The degree to which an output is good relative to grounding in another modality and/or real-world or virtual-world objects as a frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ease of communication:",
"sec_num": null
},
{
"text": "The degree to which an output is good relative to human language use as a frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to input: The degree to which an output is good relative to the input.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to input (both form and content): The degree to which the form and content of an output is good relative to the input. E.g. \"does the output text reflect the input topic labels?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to input (content): The degree to which an output is good relative to the input. E.g. \"does the output text include the important content from inputs?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to input (form): The degree to which the form of an output is good relative to the input. E.g. in paraphrasing: \"is the surface form of the output different enough from that of the input?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to linguistic context in which they are read/heard: The degree to which an output is good relative to linguistic context as a frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Goodness of outputs relative to system use: The degree to which an output is good relative to system use as a frame of reference.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Grammaticality: The degree to which an output is free of grammatical errors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Humanlikeness: The degree to which an output could have been produced by a human.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Humanlikeness (both form and content): The degree to which the form and content of an output could have been produced/chosen by a human.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Humanlikeness (content): The degree to which the content of an output could have been chosen by a human (irrespective of quality of form).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Humanlikeness (form): The degree to which the form of an output could have been produced by a human (irrespective of quality of content).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Inferrability of speaker/author stance [OBJECT]: The degree to which the speaker's/author's stance towards an OB-JECT is inferrable from the text. E.g. \"rank these texts in order of positivity expressed towards the company.\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Inferrability of speaker/author trait [TRAIT]: The degree to which it is inferrable from the output whether the speaker/author has a TRAIT. Open-class criterion; TRAIT can be a wide variety of different things, e.g. personality type, identity of author/speaker, etc. E.g. \"who among the writers of these texts do you think is the most conscientious?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Information content of outputs: The amount of information conveyed by an output. Can range from 'too much' to 'not enough', or 'very little' to 'a lot'. E.g. \"is the general level of details provided in the text satisfactory?\", \"do you personally find the amount of information in the text optimal?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Multiple (list all): use only if authors use single criterion name which corresponds to more than one criterion name in the above list. Include list of corresponding criteria in brackets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Naturalness: The degree to which the output is likely to be used by a native speaker in the given context/situation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Naturalness (both form and content): The degree to which the form and content of an output is likely to be produced/chosen by a native speaker in the given context/situation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Naturalness (content): The degree to which the content of an output is likely to be chosen by a native speaker in the given context/situation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Naturalness (form): The degree to which the form of an output is likely to be produced by a native speaker in the given context/situation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Nonredundancy (both form and content): The degree to which the form and content of an output are free of redundant elements, such as repetition, overspecificity, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Nonredundancy (content): The degree to which the content of an output is free of redundant elements, such as repetition, overspecificity, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Nonredundancy (form): The degree to which the form of an output is free of redundant elements, such as repetition, overspecificity, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Quality of outputs: Maximally underspecified quality criterion. E.g. when participants are asked which of a set of alternative outputs they prefer (with no further details).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Readability: The degree to which an output is easy to read, the reader not having to look back and reread earlier text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Referent resolvability: The degree to which the referents of the referring expressions in an output can be identified.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Speech quality: The degree to which the speech is of good quality in spoken outputs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Spelling accuracy: The degree to which an output is free of spelling errors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Text Property [PROPERTY]: The degree to which an output has a specific property (excluding features controlled by an input parameter). Open class criterion; PROPERTY could be a wide variety of different things: conversational, informative, etc. E.g. \"does the text have the characteristics of a poem?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Text Property [Complexity/simplicity]: The degree to which an output is complex/simple.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Text Property [Complexity/simplicity (both form and content)]: The degree to which an output as a whole is complex/simple.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Text Property [Complexity/simplicity (content)]: The degree to which an output conveys complex/simple content/meaning/information. E.g. \"does the generated question involve reasoning over multiple sentences from the document?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Text Property [Complexity/simplicity (form)]: The degree to which an output is expressed in complex/simple terms. E.g.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "\"does the generated text contain a lot of technical or specialist words?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Understandability: Degree to which the meaning of an output can be understood.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Usability: The degree to which the system in the context of which outputs are generated is usable. E.g. user-system interaction measurements, or direct usability ratings for the system.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Usefulness (nonspecific): The degree to which an output is useful. E.g. measuring task success, or questions like \"did you find the system advice useful?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Usefulness for task/information need: The degree to which an output is useful for a given task or information need. E.g. \"does the description help you to select an area for buying a house?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "User satisfaction: The degree to which users are satisfied with the system in the context of which outputs are generated. E.g. in a dialogue system \"how satisfied were you with the booking you just made?\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "Wellorderedness: The degree to which the content of an output is well organised and presents information in the right order.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "E Taxonomy of Quality Criteria Figure 4 shows the 71 quality criteria (plus some filler nodes, in grey) structured hierarchically into a taxonomy. For the top three levels of branches in the taxonomy we used the quality criterion properties from Belz et al. (2020) : (i) goodness vs. correctness vs. features; (ii) quality of output in its own right vs. quality of output relative to input vs. quality of output relative to an external frame of reference (yellow, red, orange); (iii) form of output vs. content of output vs. both form and content of output (green, blue, purple). Note that the taxonomy is not necessarily complete in this state; it contains all and only those 71 distinct criteria that resulted from our survey. ",
"cite_spans": [
{
"start": 246,
"end": 264,
"text": "Belz et al. (2020)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [
{
"start": 31,
"end": 39,
"text": "Figure 4",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Goodness of outputs relative to how humans use language:",
"sec_num": null
},
{
"text": "The dataset of annotated PDFs, annotation spreadsheet, annotation scheme, code, and guidelines resulting from the work are available in the project repository: https:// evalgenchal.github.io/20Y-CHEC/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The increase in summarisation may be due to an increase in summarisation papers submitted to INLG, the increase in end-to-end generation in part to changing terminology.3 For explanations of attribute values see annotation guidelines in Supplementary Material.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "Howcroft and Rieser's contributions were supported under EPSRC project MaDrIgAL (EP/N017536/1). Gkatzia's contribution was supported under the EPSRC project CiViL (EP/T014598/1).Mille's contribution was supported by the European Commission under the H2020 contracts 870930-RIA, 779962-RIA, 825079-RIA, 786731-RIA.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
},
{
"text": "Correctness",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of outputs",
"sec_num": null
},
{
"text": "EFFECT = { learns, is interested, changes behaviour, feels entertained, is amused, is engaged, feels in a specific emotional state... }",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effect on reader/listener [EFFECT]",
"sec_num": null
},
{
"text": "OBJECT = { person, policy, product, team, topic, ... }",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferrability of speaker/author stance [OBJECT]",
"sec_num": null
},
{
"text": "TRAIT = { personality type, identity of author/speaker, ... }",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferrability of speaker/author trait [TRAIT]",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Evaluation methodologies in automatic question generation",
"authors": [
{
"first": "Jacopo",
"middle": [],
"last": "Amidei",
"suffix": ""
},
{
"first": "Paul",
"middle": [],
"last": "Piwek",
"suffix": ""
},
{
"first": "Alistair",
"middle": [],
"last": "Willis",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 11th International Conference on Natural Language Generation",
"volume": "",
"issue": "",
"pages": "307--317",
"other_ids": {
"DOI": [
"10.18653/v1/W18-6537"
]
},
"num": null,
"urls": [],
"raw_text": "Jacopo Amidei, Paul Piwek, and Alistair Willis. 2018. Evaluation methodologies in automatic question generation 2013-2018. In Proceedings of the 11th International Conference on Natural Language Gen- eration, pages 307-317, Tilburg University, The Netherlands. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Inter-Coder Agreement for Computational Linguistics",
"authors": [
{
"first": "Ron",
"middle": [],
"last": "Artstein",
"suffix": ""
},
{
"first": "Massimo",
"middle": [],
"last": "Poesio",
"suffix": ""
}
],
"year": 2008,
"venue": "Computational Linguistics",
"volume": "34",
"issue": "4",
"pages": "555--596",
"other_ids": {
"DOI": [
"10.1162/coli.07-034-R2"
]
},
"num": null,
"urls": [],
"raw_text": "Ron Artstein and Massimo Poesio. 2008. Inter-Coder Agreement for Computational Linguistics. Compu- tational Linguistics, 34(4):555-596.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Disentangling the properties of human evaluation methods: A classification system to support comparability, meta-evaluation and reproducibility testing",
"authors": [
{
"first": "Anya",
"middle": [],
"last": "Belz",
"suffix": ""
},
{
"first": "Simon",
"middle": [],
"last": "Mille",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Howcroft",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 13th International Conference on Natural Language Generation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anya Belz, Simon Mille, and David Howcroft. 2020. Disentangling the properties of human evaluation methods: A classification system to support compa- rability, meta-evaluation and reproducibility testing. In Proceedings of the 13th International Conference on Natural Language Generation.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science. Transactions of the Association for Computational Linguistics",
"authors": [
{
"first": "Emily",
"middle": [
"M"
],
"last": "Bender",
"suffix": ""
},
{
"first": "Batya",
"middle": [],
"last": "Friedman",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "6",
"issue": "",
"pages": "587--604",
"other_ids": {
"DOI": [
"10.1162/tacl_a_00041"
]
},
"num": null,
"urls": [],
"raw_text": "Emily M. Bender and Batya Friedman. 2018. Data Statements for Natural Language Processing: To- ward Mitigating System Bias and Enabling Better Science. Transactions of the Association for Com- putational Linguistics, 6:587-604.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Evaluation of text generation: A survey",
"authors": [
{
"first": "Asli",
"middle": [],
"last": "Celikyilmaz",
"suffix": ""
},
{
"first": "Elizabeth",
"middle": [],
"last": "Clark",
"suffix": ""
},
{
"first": "Jianfeng",
"middle": [],
"last": "Gao",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Asli Celikyilmaz, Elizabeth Clark, and Jianfeng Gao. 2020. Evaluation of text generation: A survey.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Readability: An Appraisal of Research and Application. The Ohio State University",
"authors": [
{
"first": "Jeanne",
"middle": [
"S"
],
"last": "Chall",
"suffix": ""
}
],
"year": 1958,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jeanne S. Chall. 1958. Readability: An Appraisal of Research and Application. The Ohio State Univer- sity, Columbus, OH, USA.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Using the Crowd for Readability Prediction",
"authors": [
{
"first": "V\u00e9ronique",
"middle": [],
"last": "Orph\u00e9e De Clercq",
"suffix": ""
},
{
"first": "Bart",
"middle": [],
"last": "Hoste",
"suffix": ""
},
{
"first": "Philip",
"middle": [],
"last": "Desmet",
"suffix": ""
},
{
"first": "Martine",
"middle": [
"De"
],
"last": "Van Oosten",
"suffix": ""
},
{
"first": "Lieve",
"middle": [],
"last": "Cock",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Macken",
"suffix": ""
}
],
"year": 2014,
"venue": "Natural Language Engineering",
"volume": "20",
"issue": "03",
"pages": "293--325",
"other_ids": {
"DOI": [
"10.1017/S1351324912000344"
]
},
"num": null,
"urls": [],
"raw_text": "Orph\u00e9e De Clercq, V\u00e9ronique Hoste, Bart Desmet, Philip Van Oosten, Martine De Cock, and Lieve Macken. 2014. Using the Crowd for Readabil- ity Prediction. Natural Language Engineering, 20(03):293-325.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Survey of the state of the art in natural language generation: Core tasks, applications and evaluation",
"authors": [
{
"first": "Albert",
"middle": [],
"last": "Gatt",
"suffix": ""
},
{
"first": "Emiel",
"middle": [],
"last": "Krahmer",
"suffix": ""
}
],
"year": 2018,
"venue": "Journal of Artificial Intelligence Research",
"volume": "61",
"issue": "",
"pages": "65--170",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Albert Gatt and Emiel Krahmer. 2018. Survey of the state of the art in natural language generation: Core tasks, applications and evaluation. Journal of Artifi- cial Intelligence Research, 61:65-170.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Datasheets for Datasets",
"authors": [
{
"first": "Timnit",
"middle": [],
"last": "Gebru",
"suffix": ""
},
{
"first": "Jamie",
"middle": [],
"last": "Morgenstern",
"suffix": ""
},
{
"first": "Briana",
"middle": [],
"last": "Vecchione",
"suffix": ""
},
{
"first": "Jennifer",
"middle": [
"Wortman"
],
"last": "Vaughan",
"suffix": ""
},
{
"first": "Hanna",
"middle": [],
"last": "Wallach",
"suffix": ""
},
{
"first": "Hal",
"middle": [
"Daum\u00e9"
],
"last": "Iii",
"suffix": ""
},
{
"first": "Kate",
"middle": [],
"last": "Crawford",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 5th Workshop on Fairness, Accountability, and Transparency in Machine Learning",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Timnit Gebru, Jamie Morgenstern, Briana Vec- chione, Jennifer Wortman Vaughan, Hanna Wal- lach, Hal Daum\u00e9 Iii, and Kate Crawford. 2018. Datasheets for Datasets. In Proceedings of the 5th Workshop on Fairness, Accountability, and Trans- parency in Machine Learning, Stockholm, Sweden.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A snapshot of NLG evaluation practices 2005-2014",
"authors": [
{
"first": "Dimitra",
"middle": [],
"last": "Gkatzia",
"suffix": ""
},
{
"first": "Saad",
"middle": [],
"last": "Mahamood",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 15th European Workshop on Natural Language Generation (ENLG)",
"volume": "",
"issue": "",
"pages": "57--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dimitra Gkatzia and Saad Mahamood. 2015. A snap- shot of NLG evaluation practices 2005-2014. In Pro- ceedings of the 15th European Workshop on Natural Language Generation (ENLG), pages 57-60.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Best practices for the human evaluation of automatically generated text",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Van Der Lee",
"suffix": ""
},
{
"first": "Albert",
"middle": [],
"last": "Gatt",
"suffix": ""
},
{
"first": "Sander",
"middle": [],
"last": "Emiel Van Miltenburg",
"suffix": ""
},
{
"first": "Emiel",
"middle": [],
"last": "Wubben",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Krahmer",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 12th International Conference on Natural Language Generation",
"volume": "",
"issue": "",
"pages": "355--368",
"other_ids": {
"DOI": [
"10.18653/v1/W19-8643"
]
},
"num": null,
"urls": [],
"raw_text": "Chris van der Lee, Albert Gatt, Emiel van Miltenburg, Sander Wubben, and Emiel Krahmer. 2019. Best practices for the human evaluation of automatically generated text. In Proceedings of the 12th Interna- tional Conference on Natural Language Generation, pages 355-368, Tokyo, Japan. Association for Com- putational Linguistics.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Model Cards for Model Reporting",
"authors": [
{
"first": "Margaret",
"middle": [],
"last": "Mitchell",
"suffix": ""
},
{
"first": "Simone",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Zaldivar",
"suffix": ""
},
{
"first": "Parker",
"middle": [],
"last": "Barnes",
"suffix": ""
},
{
"first": "Lucy",
"middle": [],
"last": "Vasserman",
"suffix": ""
},
{
"first": "Ben",
"middle": [],
"last": "Hutchinson",
"suffix": ""
},
{
"first": "Elena",
"middle": [],
"last": "Spitzer",
"suffix": ""
},
{
"first": "Deborah",
"middle": [],
"last": "Inioluwa",
"suffix": ""
},
{
"first": "Timnit",
"middle": [],
"last": "Raji",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Gebru",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency -FAT* '19",
"volume": "",
"issue": "",
"pages": "220--229",
"other_ids": {
"DOI": [
"10.1145/3287560.3287596"
]
},
"num": null,
"urls": [],
"raw_text": "Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model Cards for Model Reporting. In Proceedings of the Conference on Fairness, Ac- countability, and Transparency -FAT* '19, pages 220-229, Atlanta, GA, USA. ACM Press.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Preferred reporting items for systematic reviews and meta-analyses: the prisma statement",
"authors": [
{
"first": "David",
"middle": [],
"last": "Moher",
"suffix": ""
},
{
"first": "Alessandro",
"middle": [],
"last": "Liberati",
"suffix": ""
},
{
"first": "Jennifer",
"middle": [],
"last": "Tetzlaff",
"suffix": ""
},
{
"first": "Douglas G",
"middle": [],
"last": "Altman",
"suffix": ""
}
],
"year": 2009,
"venue": "BMJ",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.1136/bmj.b2535"
]
},
"num": null,
"urls": [],
"raw_text": "David Moher, Alessandro Liberati, Jennifer Tetzlaff, and Douglas G Altman. 2009. Preferred reporting items for systematic reviews and meta-analyses: the prisma statement. BMJ, 339.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Why we need new evaluation metrics for NLG",
"authors": [
{
"first": "Jekaterina",
"middle": [],
"last": "Novikova",
"suffix": ""
},
{
"first": "Ond\u0159ej",
"middle": [],
"last": "Du\u0161ek",
"suffix": ""
},
{
"first": "Amanda",
"middle": [
"Cercas"
],
"last": "Curry",
"suffix": ""
},
{
"first": "Verena",
"middle": [],
"last": "Rieser",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2241--2252",
"other_ids": {
"DOI": [
"10.18653/v1/D17-1238"
]
},
"num": null,
"urls": [],
"raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, Amanda Cer- cas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for NLG. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2241-2252, Copenhagen, Denmark. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "RankME: Reliable human ratings for natural language generation",
"authors": [
{
"first": "Jekaterina",
"middle": [],
"last": "Novikova",
"suffix": ""
},
{
"first": "Ond\u0159ej",
"middle": [],
"last": "Du\u0161ek",
"suffix": ""
},
{
"first": "Verena",
"middle": [],
"last": "Rieser",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "2",
"issue": "",
"pages": "72--78",
"other_ids": {
"DOI": [
"10.18653/v1/N18-2012"
]
},
"num": null,
"urls": [],
"raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, and Verena Rieser. 2018. RankME: Reliable human ratings for natural language generation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 72-78, New Orleans, Louisiana. Association for Computational Linguistics.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "The Machine Learning Reproducibility Checklist (v2.0",
"authors": [
{
"first": "Joelle",
"middle": [],
"last": "Pineau",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joelle Pineau. 2020. The Machine Learning Repro- ducibility Checklist (v2.0, Apr.7 2020).",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "A Structured Review of the Validity of BLEU",
"authors": [
{
"first": "Ehud",
"middle": [],
"last": "Reiter",
"suffix": ""
}
],
"year": 2018,
"venue": "Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1--8",
"other_ids": {
"DOI": [
"10.1162/coli_a_00322"
]
},
"num": null,
"urls": [],
"raw_text": "Ehud Reiter. 2018. A Structured Review of the Validity of BLEU. Computational Linguistics, pages 1-8.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "An investigation into the validity of some metrics for automatically evaluating natural language generation systems",
"authors": [
{
"first": "Ehud",
"middle": [],
"last": "Reiter",
"suffix": ""
},
{
"first": "Anja",
"middle": [],
"last": "Belz",
"suffix": ""
}
],
"year": 2009,
"venue": "Computational Linguistics",
"volume": "35",
"issue": "4",
"pages": "529--558",
"other_ids": {
"DOI": [
"10.1162/coli.2009.35.4.35405"
]
},
"num": null,
"urls": [],
"raw_text": "Ehud Reiter and Anja Belz. 2009. An investigation into the validity of some metrics for automatically evalu- ating natural language generation systems. Compu- tational Linguistics, 35(4):529-558.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Beyond accuracy: Behavioral testing of NLP models with CheckList",
"authors": [
{
"first": "Tongshuang",
"middle": [],
"last": "Marco Tulio Ribeiro",
"suffix": ""
},
{
"first": "Carlos",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Sameer",
"middle": [],
"last": "Guestrin",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Singh",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "4902--4912",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Be- havioral testing of NLP models with CheckList. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4902- 4912, Online. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Towards best experiment design for evaluating dialogue system output",
"authors": [
{
"first": "Sashank",
"middle": [],
"last": "Santhanam",
"suffix": ""
},
{
"first": "Samira",
"middle": [],
"last": "Shaikh",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 12th International Conference on Natural Language Generation",
"volume": "",
"issue": "",
"pages": "88--94",
"other_ids": {
"DOI": [
"10.18653/v1/W19-8610"
]
},
"num": null,
"urls": [],
"raw_text": "Sashank Santhanam and Samira Shaikh. 2019. To- wards best experiment design for evaluating dia- logue system output. In Proceedings of the 12th International Conference on Natural Language Gen- eration, pages 88-94, Tokyo, Japan. Association for Computational Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"text": "Number of INLG/ENLG papers per year with human evaluation (black) and overall (full bar).",
"type_str": "figure",
"uris": null
},
"FIGREF2": {
"num": null,
"text": "Part of Sankey diagram of evaluation criteria names from NLG papers between 2000 & 2019 (left) mapped to normalised criteria names representing our assessment of what was actually measured (right).",
"type_str": "figure",
"uris": null
},
"FIGREF3": {
"num": null,
"text": "of outputs in their own right; goodness of outputs in their own right (form); goodness of outputs in their own right (both form and content; grammaticality; humanlikeness); readability; [multiple (3): goodness of outputs in their own right (both form and content), grammaticality, naturalness (form)]; [multiple (2): goodness of outputs in their own right (form), grammaticality]; [multiple (3): fluency, grammaticality]; [multiple (2): grammaticality, readability]; [multiple (2): fluency, readability]; [multiple (3): goodness of outputs in their own right (both form and content), grammaticality, naturalness (form)]; [multiple (3): coherence, humanlikeness, quality of outputs]; [multiple (2): goodness of outputs in their own right (both form and content)of outputs in their own right; goodness of outputs in their own right (both form and content); quality of outputs; usefulness for task/information need; readability; [multiple (2): coherence, fluency]; [multiple (2): fluency, readability]; [multiple (2): readability, understandability]; [multiple (3): clarity, correctness of outputs in their own right (form), goodness of outputs in their own right] 10 coherence appropriateness (content); coherence; correctness of outputs in their own right (content); goodness of outputs in their own right (content); goodness of outputs relative to linguistic context in which they are read/heard; wellorderedness; [multiple (2): appropriateness (content), understandability]; [multiple (2): fluency, grammaticality] 8 naturalness clarity; humanlikeness; naturalness; naturalness (both form and content); [multiple (2): naturalness (both form and content), readability]; [multiple (2): grammaticality, naturalness] 6 quality goodness of outputs in their own right; goodness of outputs in their own right (both form and content); goodness of outputs (excluding correctness); quality of outputs; [multiple (3): correctness of outputs relative to input (content), Fluency, Grammaticality] 5 correctness appropriateness (content); correctness of outputs relative to input (content); correctness of outputs relative to input (both form and content); correctness of outputs relative to input (form) 4 usability clarity; quality of outputs; usefulness for task/information need; user satisfaction 4 clarity clarity; correctness of outputs relative to input (content); understandability; [multiple (2): clarity, understandability] 4 informativeness correctness of outputs relative to input (content); goodness of outputs relative to input (content); information content of outputs; text property (informative) 4 accuracy correctness of outputs relative to input; correctness of outputs relative to input (content); goodness of outputs relative to input (content); referent resolvability 4",
"type_str": "figure",
"uris": null
},
"FIGREF5": {
"num": null,
"text": "Taxonomy of normalised quality criteria; greyed out criterion names = not encountered, and/or included for increased completeness of taxonomy.",
"type_str": "figure",
"uris": null
},
"TABREF0": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Number of papers at each selection stage."
},
"TABREF2": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Krippendorff's alpha with Jaccard for closedclass attributes in the 1 st and 2 nd IAA tests. Numbers are not directly comparable (a) between the two tests due to changes in the annotation scheme; (b) within the 2 nd test due to different numbers of annotators."
},
"TABREF4": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Counts of values selected for form of response elicitation."
},
"TABREF6": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": ""
},
"TABREF7": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Quality criterion names as given by authors mapped to normalised criterion names reflecting our assessment of what the authors actually measured. 'Count' is the number of different mappings found for each original criterion name."
},
"TABREF9": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Quality criteria most frequently combined in a single prompt/question put to evaluators. Show examples of inputs and outputs of your system. Additionally, if you include pre and post-processing steps in your pipeline, clarify whether your input is to the preprocessing, and your output is from the post-processing, step, or what you consider to be the 'core' NLG system. In general, make it easy for readers to determine what form the data is in as it flows through your system."
},
"TABREF10": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Reporting of human evaluations in NLG: Recommended minimum information to include."
},
"TABREF11": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>B System task</td><td/><td/><td/></tr><tr><td>TASK</td><td colspan=\"4\">Before 2010 Since Total</td></tr><tr><td>data-to-text generation</td><td/><td>14</td><td>34</td><td>48</td></tr><tr><td>dialogue turn generation</td><td/><td>7</td><td>14</td><td>21</td></tr><tr><td>summarisation (text-to-text)</td><td/><td>1</td><td>11</td><td>12</td></tr><tr><td colspan=\"2\">referring expression generation</td><td>4</td><td>7</td><td>11</td></tr><tr><td>end-to-end text generation</td><td/><td>1</td><td>8</td><td>9</td></tr><tr><td>question generation</td><td/><td>0</td><td>9</td><td>9</td></tr><tr><td>feature-controlled generation</td><td/><td>4</td><td>5</td><td>9</td></tr><tr><td colspan=\"2\">surface realisation (slr to text)</td><td>3</td><td>5</td><td>8</td></tr><tr><td>deep generation (dlr to text)</td><td/><td>4</td><td>4</td><td>8</td></tr><tr><td colspan=\"3\">paraphrasing / lossless simplification 2</td><td>6</td><td>8</td></tr><tr><td>Other (15 tasks)</td><td/><td>20</td><td>17</td><td>37</td></tr></table>",
"text": "Language frequencies before and after 2010."
},
"TABREF12": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>C System Output</td><td/></tr><tr><td>Output</td><td>Count</td></tr><tr><td>text: multiple sentences</td><td>68</td></tr><tr><td>text: sentence</td><td>40</td></tr><tr><td>text: documents</td><td>20</td></tr><tr><td>text: subsentential units of text</td><td>13</td></tr><tr><td>text: variable-length</td><td>10</td></tr><tr><td>no output (human generation)</td><td>7</td></tr><tr><td>raw/structured data</td><td>3</td></tr><tr><td>text: dialogue</td><td>3</td></tr><tr><td>shallow linguistic representation (slr)</td><td>2</td></tr><tr><td>deep linguistic representation (dlr)</td><td>1</td></tr><tr><td>speech</td><td>1</td></tr><tr><td>text: other (please specify): templates</td><td>1</td></tr></table>",
"text": "Task frequencies before and after 2010."
},
"TABREF13": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table/>",
"text": "Counts for system output attribute."
}
}
}
} |