zihanliu commited on
Commit
f2ad62a
1 Parent(s): 5531843

Upload 7 files

Browse files
evaluation/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### Commands for running inference
3
+
4
+ ```console
5
+ python run_generation_vllm.py --model-id nvidia/ChatQA-1.5-8B --eval-dataset DATASET_NAME --data-folder PATH_TO_YOUR_DATA --output-folder PATH_TO_OUTPUT_FOLDER
6
+ ```
7
+
8
+ ### Commands for calculating scores for generated outputs
9
+ ```console
10
+ python get_scores.py
11
+ ```
evaluation/arguments.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import argparse
3
+ import os
4
+
5
+ def get_args():
6
+ parser = argparse.ArgumentParser(description="ChatQA-HF")
7
+
8
+ ## model
9
+ parser.add_argument('--model-id', type=str, default='', help='model id')
10
+
11
+ ## dataset path
12
+ parser.add_argument('--data-folder', type=str, default='', help='path to the datafolder of ConvRAG')
13
+ parser.add_argument('--output-folder', type=str, default='', help='path to the datafolder of ConvRAG')
14
+ parser.add_argument('--eval-dataset', type=str, default='')
15
+ parser.add_argument('--doc2dial-path', type=str, default='doc2dial/test.json')
16
+ parser.add_argument('--convfinqa-path', type=str, default='convfinqa/dev.json')
17
+ parser.add_argument('--quac-path', type=str, default='quac/test.json')
18
+ parser.add_argument('--qrecc-path', type=str, default='qrecc/test.json')
19
+ parser.add_argument('--doqa-cooking-path', type=str, default='doqa/test_cooking.json')
20
+ parser.add_argument('--doqa-travel-path', type=str, default='doqa/test_travel.json')
21
+ parser.add_argument('--doqa-movies-path', type=str, default='doqa/test_movies.json')
22
+ parser.add_argument('--coqa-path', type=str, default='coqa/dev.json')
23
+ parser.add_argument('--hybridial-path', type=str, default='hybridial/test.json')
24
+ parser.add_argument('--sqa-path', type=str, default='sqa/test.json')
25
+ parser.add_argument('--topiocqa-path', type=str, default='topiocqa/dev.json')
26
+ parser.add_argument('--inscit-path', type=str, default='inscit/dev.json')
27
+
28
+ ## others
29
+ parser.add_argument('--out-seq-len', type=int, default=64)
30
+ parser.add_argument('--num-ctx', type=int, default=5)
31
+ parser.add_argument('--max-tokens', type=int, default=64)
32
+
33
+ args = parser.parse_args()
34
+
35
+ return args
evaluation/dataset.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+
4
+
5
+ def load_data(datapath):
6
+ print("loading data from %s" % datapath)
7
+ with open(datapath, "r") as f:
8
+ data_list = json.load(f)
9
+
10
+ return data_list
11
+
12
+
13
+ def reformat_question(turn_list, dataset_name):
14
+
15
+ ## only take the lastest 7 turns
16
+ turn_list = turn_list[-7:]
17
+ assert turn_list[-1]['role'] == 'user'
18
+
19
+ long_answer_dataset_list = ["doc2dial", "quac", "qrecc", "inscit", "doqa_movies", "doqa_travel", "doqa_cooking", "hybridial", "convfinqa"]
20
+ long_and_short_dataset_list = ["topiocqa"]
21
+ entity_dataset_list = ["sqa"]
22
+ short_dataset_list = ["coqa"]
23
+
24
+ if dataset_name in long_answer_dataset_list:
25
+ for item in turn_list:
26
+ if item['role'] == 'user':
27
+ ## only needs to add it on the first user turn
28
+ item['content'] = 'Please give a full and complete answer for the question. ' + item['content']
29
+ break
30
+
31
+ elif dataset_name in long_and_short_dataset_list:
32
+ turn_list[-1]['content'] = "Answer the following question with a short span, or a full and complete answer. " + turn_list[-1]['content']
33
+
34
+ elif dataset_name in entity_dataset_list:
35
+ turn_list[-1]['content'] = "Answer the following question with one or a list of items. " + turn_list[-1]['content']
36
+
37
+ elif dataset_name in short_dataset_list:
38
+ turn_list[-1]['content'] = "Answer the following question with a short span. The answer needs to be just in a few words. " + turn_list[-1]['content']
39
+
40
+ else:
41
+ raise Exception("please input a correct dataset name!")
42
+
43
+ question = ""
44
+ for item in turn_list:
45
+ if item["role"] == "user":
46
+ question += "User: " + item["content"] + "\n\n"
47
+ else:
48
+ assert item["role"] == "assistant"
49
+ question += "Assistant: " + item["content"] + "\n\n"
50
+
51
+ question += "Assistant:"
52
+
53
+ return question
54
+
55
+
56
+ def get_inputs(data_list, dataset_name, tokenizer, num_ctx, max_output_len, max_seq_length=4096):
57
+
58
+ system = "System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context."
59
+
60
+ prompt_list = []
61
+ for item in data_list:
62
+ turn_list = item['messages']
63
+ question_formatted = reformat_question(turn_list, dataset_name)
64
+
65
+ ctx_list = ["title: " + ctx["title"] + ", source: " + ctx["text"] for ctx in item['ctxs'][:num_ctx]]
66
+ context = "\n\n".join(ctx_list)
67
+
68
+ context_tokens = tokenizer.encode(context)
69
+ question_tokens = tokenizer.encode(question_formatted)
70
+ system_tokens = tokenizer.encode(system)
71
+
72
+ if len(context_tokens) + len(question_tokens) + len(system_tokens) + max_output_len >= max_seq_length:
73
+ context_tokens = context_tokens[:max_seq_length - max_output_len - len(question_tokens) - len(system_tokens)]
74
+ context = tokenizer.decode(context_tokens, skip_special_tokens=True)
75
+
76
+ model_input = system + "\n\n" + context + "\n\n" + question_formatted
77
+
78
+ prompt_list.append(model_input)
79
+
80
+ return prompt_list
81
+
evaluation/evaluation_utils.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ## a index list of the sample where the correct context is found in the top-5 retrieved contexts
4
+ quac_correct_retrieved_instance_idx_list = [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 152, 153, 156, 157, 158, 159, 161, 162, 163, 164, 165, 166, 167, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 188, 189, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 216, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 243, 245, 246, 248, 249, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 284, 285, 287, 289, 290, 291, 292, 293, 294, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 335, 336, 337, 338, 339, 340, 341, 342, 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 362, 363, 364, 365, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 415, 417, 419, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 437, 438, 440, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 466, 467, 468, 469, 470, 471, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 485, 486, 488, 489, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 509, 510, 511, 512, 514, 515, 518, 519, 520, 521, 522, 523, 524, 527, 528, 529, 530, 531, 532, 533, 534, 535, 538, 539, 540, 541, 542, 543, 544, 547, 548, 549, 550, 551, 552, 554, 555, 557, 558, 560, 561, 562, 563, 564, 565, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 608, 609, 610, 611, 612, 613, 614, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 637, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 652, 653, 655, 656, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 692, 693, 695, 699, 700, 701, 703, 704, 705, 707, 708, 709, 711, 712, 715, 716, 717, 718, 719, 720, 721, 723, 724, 726, 728, 732, 733, 734, 738, 739, 740, 741, 742, 743, 744, 748, 749, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 773, 774, 775, 776, 777, 778, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 804, 805, 806, 807, 809, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 822, 823, 824, 825, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 848, 849, 850, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878, 879, 880, 883, 884, 885, 886, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 938, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 972, 974, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 995, 998, 999, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1067, 1068, 1069, 1070, 1071, 1073, 1074, 1075, 1076, 1077, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1093, 1094, 1095, 1096, 1098, 1099, 1100, 1102, 1103, 1105, 1106, 1107, 1109, 1110, 1113, 1114, 1118, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1130, 1131, 1133, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1225, 1226, 1227, 1228, 1229, 1230, 1232, 1233, 1234, 1235, 1237, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1267, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1283, 1284, 1285, 1287, 1288, 1289, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, 1343, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1402, 1403, 1404, 1405, 1407, 1408, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1447, 1448, 1449, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1467, 1468, 1469, 1471, 1472, 1474, 1475, 1476, 1477, 1478, 1479, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1495, 1498, 1504, 1505, 1507, 1508, 1509, 1512, 1513, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1536, 1537, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1608, 1609, 1611, 1612, 1613, 1614, 1615, 1616, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1639, 1640, 1641, 1642, 1643, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1655, 1656, 1657, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1678, 1679, 1681, 1682, 1683, 1686, 1687, 1688, 1689, 1690, 1691, 1694, 1695, 1696, 1697, 1699, 1700, 1701, 1702, 1703, 1704, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1723, 1724, 1725, 1726, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1758, 1759, 1760, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1779, 1780, 1782, 1783, 1785, 1786, 1787, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1800, 1801, 1802, 1803, 1804, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1843, 1844, 1845, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1860, 1863, 1864, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1903, 1904, 1906, 1907, 1908, 1910, 1911, 1912, 1914, 1915, 1917, 1920, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1944, 1946, 1947, 1948, 1950, 1951, 1952, 1955, 1956, 1957, 1958, 1959, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014, 2015, 2017, 2018, 2019, 2020, 2021, 2023, 2025, 2026, 2027, 2028, 2029, 2033, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2073, 2074, 2075, 2078, 2079, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2109, 2110, 2112, 2113, 2114, 2115, 2116, 2117, 2120, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2132, 2134, 2138, 2139, 2140, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2174, 2177, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2191, 2192, 2193, 2196, 2198, 2199, 2201, 2202, 2205, 2206, 2207, 2209, 2212, 2213, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2281, 2282, 2283, 2284, 2285, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2299, 2301, 2302, 2303, 2304, 2305, 2306, 2308, 2309, 2311, 2312, 2313, 2314, 2315, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2342, 2343, 2344, 2346, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2361, 2362, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2379, 2384, 2385, 2386, 2387, 2388, 2389, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2419, 2420, 2421, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2462, 2463, 2464, 2466, 2467, 2469, 2470, 2472, 2473, 2475, 2476, 2477, 2478, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2511, 2512, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2541, 2542, 2543, 2544, 2545, 2546, 2548, 2551, 2552, 2553, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2593, 2594, 2595, 2596, 2599, 2600, 2601, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2615, 2616, 2618, 2619, 2620, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2684, 2687, 2688, 2691, 2692, 2693, 2694, 2695, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2719, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2737, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2768, 2769, 2771, 2773, 2774, 2775, 2776, 2777, 2778, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2822, 2823, 2824, 2825, 2826, 2827, 2829, 2830, 2831, 2832, 2834, 2835, 2836, 2837, 2838, 2839, 2841, 2842, 2843, 2845, 2846, 2848, 2849, 2850, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2893, 2894, 2895, 2896, 2897, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2922, 2923, 2924, 2925, 2926, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2940, 2943, 2944, 2945, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2957, 2959, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2998, 2999, 3000, 3001, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3037, 3040, 3041, 3043, 3044, 3045, 3046, 3047, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3082, 3083, 3084, 3086, 3087, 3089, 3090, 3091, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3110, 3111, 3115, 3116, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3171, 3172, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3185, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3214, 3215, 3216, 3217, 3218, 3220, 3221, 3222, 3223, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3313, 3314, 3316, 3317, 3318, 3319, 3320, 3321, 3323, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3351, 3353, 3354, 3355, 3356, 3359, 3363, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3377, 3379, 3380, 3381, 3382, 3383, 3384, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3401, 3402, 3403, 3406, 3407, 3408, 3409, 3410, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3430, 3431, 3432, 3433, 3434, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3456, 3457, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3467, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3498, 3499, 3503, 3504, 3505, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3582, 3583, 3584, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3600, 3601, 3602, 3604, 3609, 3610, 3611, 3612, 3614, 3615, 3616, 3617, 3618, 3619, 3622, 3623, 3625, 3626, 3628, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3704, 3705, 3706, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3727, 3730, 3731, 3732, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3744, 3745, 3746, 3747, 3748, 3749, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3762, 3763, 3764, 3765, 3766, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3826, 3828, 3829, 3830, 3831, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3845, 3846, 3850, 3851, 3852, 3853, 3854, 3856, 3857, 3858, 3859, 3860, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3871, 3873, 3874, 3876, 3877, 3878, 3879, 3881, 3882, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3911, 3912, 3913, 3915, 3916, 3917, 3918, 3919, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3938, 3939, 3940, 3941, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3956, 3957, 3958, 3959, 3960, 3963, 3965, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3978, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3995, 3996, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4040, 4043, 4045, 4047, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4059, 4060, 4064, 4065, 4066, 4067, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4116, 4117, 4120, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4146, 4147, 4148, 4149, 4150, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4168, 4169, 4170, 4171, 4172, 4174, 4175, 4177, 4178, 4179, 4180, 4181, 4184, 4186, 4188, 4189, 4190, 4191, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4246, 4247, 4248, 4249, 4250, 4252, 4253, 4255, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4281, 4282, 4283, 4284, 4285, 4288, 4289, 4290, 4291, 4292, 4294, 4295, 4296, 4297, 4298, 4300, 4301, 4303, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4339, 4340, 4341, 4342, 4343, 4345, 4346, 4347, 4349, 4350, 4352, 4353, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4370, 4371, 4372, 4373, 4374, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4400, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4442, 4444, 4445, 4446, 4447, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4462, 4463, 4464, 4465, 4466, 4467, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4552, 4553, 4554, 4555, 4559, 4561, 4562, 4563, 4565, 4566, 4567, 4568, 4569, 4570, 4572, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4601, 4603, 4604, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4615, 4616, 4617, 4618, 4619, 4622, 4623, 4624, 4626, 4627, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4654, 4655, 4656, 4657, 4659, 4660, 4661, 4662, 4663, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4713, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4748, 4749, 4750, 4753, 4754, 4755, 4756, 4757, 4759, 4761, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4825, 4826, 4827, 4829, 4830, 4831, 4833, 4834, 4835, 4836, 4837, 4838, 4840, 4841, 4842, 4843, 4844, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4865, 4866, 4867, 4869, 4870, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4881, 4882, 4885, 4886, 4888, 4890, 4891, 4892, 4893, 4894, 4896, 4897, 4898, 4900, 4901, 4904, 4905, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4953, 4954, 4955, 4956, 4957, 4958, 4960, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4977, 4978, 4979, 4981, 4982, 4983, 4984, 4985, 4988, 4989, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5010, 5011, 5016, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5044, 5045, 5046, 5047, 5048, 5049, 5051, 5052, 5053, 5054, 5055, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087, 5088, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5113, 5115, 5118, 5120, 5123, 5124, 5126, 5127, 5128, 5129, 5130, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5162, 5163, 5164, 5165, 5166, 5167, 5168, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, 5183, 5185, 5186, 5187, 5188, 5189, 5191, 5192, 5193, 5194, 5195, 5196, 5197, 5198, 5199, 5200, 5201, 5202, 5203, 5204, 5205, 5206, 5208, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5237, 5240, 5241, 5244, 5245, 5246, 5247, 5248, 5249, 5250, 5251, 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5260, 5261, 5262, 5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5278, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5293, 5294, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5325, 5326, 5327, 5328, 5338, 5340, 5342, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353, 5354, 5356, 5357, 5358, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5369, 5370, 5371, 5372, 5373, 5374, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5386, 5389, 5390, 5393, 5394, 5395, 5396, 5397, 5399, 5400, 5401, 5402, 5404, 5405, 5408, 5412, 5413, 5414, 5415, 5416, 5417, 5418, 5419, 5421, 5422, 5423, 5424, 5425, 5426, 5427, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5439, 5441, 5442, 5443, 5444, 5445, 5446, 5447, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5467, 5470, 5471, 5472, 5473, 5475, 5476, 5477, 5478, 5479, 5480, 5483, 5484, 5485, 5486, 5487, 5488, 5489, 5490, 5491, 5492, 5493, 5494, 5496, 5497, 5499, 5500, 5501, 5502, 5503, 5504, 5505, 5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5529, 5530, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5541, 5542, 5543, 5544, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5560, 5561, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5571, 5573, 5574, 5576, 5578, 5579, 5580, 5581, 5582, 5583, 5584, 5589, 5590, 5591, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600, 5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5611, 5612, 5613, 5614, 5616, 5617, 5618, 5619, 5620, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5660, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5671, 5673, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5687, 5688, 5689, 5690, 5691, 5692, 5693, 5700, 5701, 5702, 5703, 5704, 5705, 5706, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5717, 5718, 5719, 5720, 5721, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5734, 5736, 5737, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5755, 5756, 5757, 5758, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5771, 5772, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5786, 5787, 5788, 5789, 5790, 5791, 5792, 5794, 5795, 5796, 5797, 5798, 5799, 5801, 5802, 5804, 5807, 5808, 5809, 5813, 5814, 5815, 5816, 5819, 5821, 5822, 5823, 5824, 5825, 5826, 5827, 5828, 5829, 5832, 5834, 5835, 5836, 5837, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5850, 5851, 5852, 5853, 5856, 5859, 5862, 5863, 5864, 5865]
5
+
6
+
7
+ unanswerable_keyphrases = ["cannot find", "can't find", "not able to", "unable to", "does not provide", "cannot provide", "cannot answer", "couldnot answer", "can't answer", "couldn't answer", "cannot be found", "cannot be determined", "do not have", "couldn't find", "no information", "does not mention", "doesn't mention", "not explicitly mentioned", "can not find", "could not find", "does not specify", "doesn't provide", "doesn't specify", "there is no", "not mentioned", "don't have", "don't know"]
evaluation/get_scores.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from evaluation_utils import quac_correct_retrieved_instance_idx_list
3
+ from evaluation_utils import unanswerable_keyphrases
4
+ import json
5
+ from metrics import F1Metric
6
+ import copy
7
+ import re
8
+
9
+
10
+ def compute_f1_score(predicted_answers, groundtruth_answer, exp_name="default"):
11
+ """Evaluating F1 Score"""
12
+ print(len(predicted_answers), len(groundtruth_answer))
13
+ if len(predicted_answers) != len(groundtruth_answer):
14
+ groundtruth_answer = groundtruth_answer[:len(predicted_answers)]
15
+
16
+ guess_list = []
17
+ for guess in predicted_answers:
18
+ guess = guess.strip()
19
+ if "</s>" in guess:
20
+ guess = guess.replace("</s>", "")
21
+ guess_list.append(guess)
22
+
23
+ answer_list = []
24
+ for answer in groundtruth_answer:
25
+ answer_list.append(answer)
26
+
27
+ assert len(guess_list) == len(answer_list), \
28
+ "lengths of guess and answer are different!"
29
+
30
+ precision, recall, f1 = F1Metric.compute_all_pairs(guess_list, answer_list)
31
+ print('Method: %s; Precision: %.4f; recall: %.4f; f1: %.4f' % (\
32
+ exp_name, precision, recall, f1))
33
+
34
+
35
+ def load_groundtruth_file(data_file):
36
+
37
+ with open(data_file, "r") as f:
38
+ examples = json.load(f)
39
+
40
+ data = []
41
+ for instance in examples:
42
+ if "answers" in instance:
43
+ answers = instance["answers"]
44
+ elif "answer" in instance:
45
+ if type(instance["answer"]) is str:
46
+ answers = [instance["answer"]]
47
+ elif type(instance["answer"]) is list:
48
+ answers = instance["answer"]
49
+ else:
50
+ answers = [str(instance["answer"])]
51
+ else:
52
+ raise ValueError("need to have answer or answers")
53
+ data.append(answers)
54
+
55
+ return data
56
+
57
+
58
+ def load_prediction(data_file):
59
+
60
+ data = []
61
+ with open(data_file, "r") as f:
62
+ for line in f.readlines():
63
+ data.append(line.strip())
64
+
65
+ return data
66
+
67
+
68
+ def evaluate_f1(ground_truth_file, prediction_file):
69
+
70
+ groundtruth_answers = load_groundtruth_file(ground_truth_file)
71
+ if "inscit" in ground_truth_file:
72
+ groundtruth_answers_update = []
73
+ for answers in groundtruth_answers:
74
+ answers_update = []
75
+ for ans in answers:
76
+ ## this answer is additionally added to the answer_list for inscit dataset, needs to remove
77
+ if ans != "Sorry. I cannot find the answer based on the context.":
78
+ answers_update.append(ans)
79
+ assert len(answers_update) > 0
80
+ groundtruth_answers_update.append(copy.deepcopy(answers_update))
81
+ groundtruth_answers = groundtruth_answers_update
82
+
83
+ predicted_answers = load_prediction(prediction_file)
84
+ if "quac" in prediction_file or "doqa" in prediction_file:
85
+ predicted_answers_new = []
86
+ for pred in predicted_answers:
87
+ pred = pred.lower()
88
+ for keyphrase in unanswerable_keyphrases:
89
+ if keyphrase in pred:
90
+ pred = "Sorry. I cannot find the answer based on the context."
91
+ break
92
+ predicted_answers_new.append(pred)
93
+ predicted_answers = predicted_answers_new
94
+
95
+ compute_f1_score(predicted_answers, groundtruth_answers)
96
+
97
+
98
+ def separate_cannot_answer(ground_truth_file, prediction_file):
99
+ # load ground truth
100
+ with open(ground_truth_file, "r") as f:
101
+ groundtruth_answers = json.load(f)
102
+ # load prediction
103
+ predicted_answers = load_prediction(prediction_file)
104
+ print(len(predicted_answers), len(groundtruth_answers))
105
+ if len(predicted_answers) != len(groundtruth_answers):
106
+ groundtruth_answers = groundtruth_answers[:len(predicted_answers)]
107
+
108
+ if "quac" in prediction_file:
109
+ """
110
+ For answerable cases, we want to make sure the retrieved context list contains the gold chunk.
111
+ For QuAC dataset, we use top-5 retrieved contexts as inputs, quac_correct_retrieved_instance_idx_list
112
+ is the index list where the top-5 retrieved context contains the gold answer
113
+ """
114
+ answerable_instance_idx_list = quac_correct_retrieved_instance_idx_list
115
+ else:
116
+ answerable_instance_idx_list = None
117
+
118
+ predicted_answers_new = []
119
+ for pred in predicted_answers:
120
+ pred = pred.lower()
121
+ for keyphrase in unanswerable_keyphrases:
122
+ if keyphrase in pred:
123
+ pred = "Sorry. I cannot find the answer based on the context."
124
+ break
125
+ predicted_answers_new.append(pred)
126
+ predicted_answers = predicted_answers_new
127
+
128
+ cannot_answer_idx_list = []
129
+ answerable_idx_list = []
130
+ if answerable_instance_idx_list:
131
+ count_idx = 0
132
+ for idx, item in enumerate(groundtruth_answers):
133
+ if 'answers' in item:
134
+ answer = item["answers"][0]
135
+ else:
136
+ answer = item['answer']
137
+ noanswer_response = "Sorry. I cannot find the answer based on the context."
138
+
139
+ if answer == noanswer_response:
140
+ cannot_answer_idx_list.append(idx)
141
+ continue
142
+
143
+ if answerable_instance_idx_list:
144
+ if count_idx in answerable_instance_idx_list:
145
+ answerable_idx_list.append(idx)
146
+ count_idx += 1
147
+ else:
148
+ answerable_idx_list.append(idx)
149
+
150
+ print("number of cannot answer cases: %d (out of %d)" % (len(cannot_answer_idx_list), len(groundtruth_answers)))
151
+ print("number of answerable cases: %d (out of %d)" % (len(answerable_idx_list), len(groundtruth_answers)))
152
+
153
+ return predicted_answers, cannot_answer_idx_list, answerable_idx_list
154
+
155
+
156
+ def get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list):
157
+ # cannot answer
158
+ noanswer_count = 0
159
+ for idx in cannot_answer_idx_list:
160
+ prediction = predicted_answers[idx]
161
+ prediction = prediction.lower()
162
+ # print(prediction)
163
+ if "sorry" in prediction and "cannot find the answer" in prediction:
164
+ # print(prediction)
165
+ noanswer_count += 1
166
+ cannot_answer_acc = noanswer_count / len(cannot_answer_idx_list)
167
+ print("accuracy of cannot answer cases: %.4f" % cannot_answer_acc)
168
+
169
+ # answerable
170
+ answerable_count = 0
171
+ for idx in answerable_idx_list:
172
+ prediction = predicted_answers[idx]
173
+ prediction = prediction.lower()
174
+ if "sorry" in prediction and "cannot find the answer" in prediction:
175
+ # print(prediction)
176
+ continue
177
+ answerable_count += 1
178
+ answerable_acc = answerable_count / len(answerable_idx_list)
179
+ print("accuracy of answerable cases: %.4f" % answerable_acc)
180
+
181
+
182
+ def evaluate_cannot_answer_acc(ground_truth_file, prediction_file):
183
+ predicted_answers, cannot_answer_idx_list, answerable_idx_list = \
184
+ separate_cannot_answer(ground_truth_file, prediction_file)
185
+
186
+ get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list)
187
+
188
+
189
+ def evaluate_convfinqa(ground_truth_file, prediction_file):
190
+ """
191
+ Since the model will give a long answer output, while the gold answer for ConvFinQA are either
192
+ a arithmetic formula or a final executed number.
193
+ We consider the output containing either the executed number or the arithmetic formula as correct.
194
+ This script is to measure the proportion of the outputs containing these elements.
195
+ """
196
+
197
+ def _is_float(string):
198
+ try:
199
+ float(string)
200
+ return True
201
+ except ValueError:
202
+ return False
203
+
204
+ with open(ground_truth_file, "r") as f:
205
+ gold_list = json.load(f)
206
+
207
+ groundtruth_answers = [item['exe_answer'] for item in gold_list]
208
+ groundtruth_answers_formula = [item['answers'][0] for item in gold_list]
209
+
210
+ ## last turn question_list
211
+ question_list = [item['messages'][-1]['content'] for item in gold_list]
212
+ predicted_answers = load_prediction(prediction_file)
213
+
214
+ print(len(predicted_answers), len(groundtruth_answers))
215
+ if len(predicted_answers) != len(groundtruth_answers):
216
+ groundtruth_answers = groundtruth_answers[:len(predicted_answers)]
217
+
218
+ count_exact_match = 0
219
+ for question, pred, gold, gold_formula in zip(question_list, predicted_answers, groundtruth_answers, groundtruth_answers_formula):
220
+
221
+ original_pred = pred
222
+ ## convert 1,000,000 into 1000000
223
+ original_pred = original_pred.replace(",", "")
224
+
225
+ ## convert $10 million + $20 million into 10 + 20
226
+ original_pred = original_pred.replace("$", "").replace("million", "").replace("billion", "")
227
+
228
+ ## convert 10 (2017) + 20 (2018) into 10 + 20
229
+ pattern = r'\((\b\w+\b)\)'
230
+ original_pred = re.sub(pattern, '', original_pred)
231
+
232
+ ## make sure it each token only has one space in between
233
+ original_pred = " ".join(original_pred.split())
234
+
235
+ if str(gold) in original_pred:
236
+ count_exact_match += 1
237
+
238
+ elif str(gold_formula) in original_pred:
239
+ count_exact_match += 1
240
+
241
+ elif _is_float(gold) and (str(round(float(gold), 3)) in original_pred or str(round(float(gold), 2)) in original_pred):
242
+ count_exact_match += 1
243
+
244
+ elif "percent" in question and (str(float(gold)*100) in original_pred or str(round(float(gold)*100, 1)) in original_pred or str(round(float(gold)*100, 2)) in original_pred):
245
+ count_exact_match += 1
246
+
247
+ elif str(gold).endswith(".0") and str(int(gold)) in original_pred:
248
+ ## gold is a integer like 80.0 then convert it into 80
249
+ count_exact_match += 1
250
+
251
+ elif "decrease" in original_pred and _is_float(gold) and gold < 0 and (str(-1 * gold) in original_pred):
252
+ ## for the case where model generates something like a decrese of 10 million, while gold is -10.
253
+ count_exact_match += 1
254
+
255
+ print("accuracy of exact match: %.4f" % (count_exact_match/len(predicted_answers)))
256
+
257
+
258
+ def main():
259
+
260
+ ## doc2dial
261
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT" # e.g., outputs/doc2idal_output.txt
262
+ ground_truth_file = "PATH_TO_THE_TEST_DATA" # e.g., data/doc2dial/test.json
263
+ print("-"*80)
264
+ print(prediction_file)
265
+ print(ground_truth_file)
266
+ evaluate_f1(ground_truth_file, prediction_file)
267
+
268
+ ## quac
269
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
270
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
271
+ print("-"*80)
272
+ print(prediction_file)
273
+ print(ground_truth_file)
274
+ evaluate_f1(ground_truth_file, prediction_file)
275
+ evaluate_cannot_answer_acc(ground_truth_file, prediction_file)
276
+
277
+ ## qrecc
278
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
279
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
280
+ print("-"*80)
281
+ print(prediction_file)
282
+ print(ground_truth_file)
283
+ evaluate_f1(ground_truth_file, prediction_file)
284
+
285
+ ## topiocqa
286
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
287
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
288
+ print("-"*80)
289
+ print(prediction_file)
290
+ print(ground_truth_file)
291
+ evaluate_f1(ground_truth_file, prediction_file)
292
+
293
+ ## inscit
294
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
295
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
296
+ print("-"*80)
297
+ print(prediction_file)
298
+ print(ground_truth_file)
299
+ evaluate_f1(ground_truth_file, prediction_file)
300
+
301
+ ## coqa
302
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
303
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
304
+ print("-"*80)
305
+ print(prediction_file)
306
+ print(ground_truth_file)
307
+ evaluate_f1(ground_truth_file, prediction_file)
308
+
309
+ ## hybridial
310
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
311
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
312
+ print("-"*80)
313
+ print(prediction_file)
314
+ print(ground_truth_file)
315
+ evaluate_f1(ground_truth_file, prediction_file)
316
+
317
+ ## sqa
318
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
319
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
320
+ print("-"*80)
321
+ print(prediction_file)
322
+ print(ground_truth_file)
323
+ evaluate_f1(ground_truth_file, prediction_file)
324
+
325
+ ## doqa_cooking
326
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
327
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
328
+ print("-"*80)
329
+ print(prediction_file)
330
+ print(ground_truth_file)
331
+ evaluate_f1(ground_truth_file, prediction_file)
332
+ evaluate_cannot_answer_acc(ground_truth_file, prediction_file)
333
+
334
+ ## doqa_travel
335
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
336
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
337
+ print("-"*80)
338
+ print(prediction_file)
339
+ print(ground_truth_file)
340
+ evaluate_f1(ground_truth_file, prediction_file)
341
+ evaluate_cannot_answer_acc(ground_truth_file, prediction_file)
342
+
343
+ ## doqa_movies
344
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
345
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
346
+ print("-"*80)
347
+ print(prediction_file)
348
+ print(ground_truth_file)
349
+ evaluate_f1(ground_truth_file, prediction_file)
350
+ evaluate_cannot_answer_acc(ground_truth_file, prediction_file)
351
+
352
+ ## convfinqa
353
+ prediction_file = "PATH_TO_THE_GENERATED_OUTPUT"
354
+ ground_truth_file = "PATH_TO_THE_TEST_DATA"
355
+ print("-"*80)
356
+ print(prediction_file)
357
+ print(ground_truth_file)
358
+ evaluate_convfinqa(ground_truth_file, prediction_file)
359
+
360
+
361
+ if __name__ == "__main__":
362
+ main()
363
+
evaluation/metrics.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # The following code is adapted from
3
+ # https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/metrics.py,
4
+ # which is licensed under the MIT license. More details on the license can be
5
+ # found at https://github.com/facebookresearch/ParlAI/blob/master/LICENSE.
6
+
7
+ """Provides standard metric evaluations for dialog."""
8
+
9
+ from collections import Counter
10
+ from typing import List
11
+ import numpy as np
12
+ import re
13
+
14
+ re_art = re.compile(r'\b(a|an|the)\b')
15
+ re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
16
+
17
+
18
+ def normalize_answer(s):
19
+ """
20
+ Lower text and remove punctuation, articles and extra whitespace.
21
+ """
22
+ s = s.lower()
23
+ s = re_punc.sub(' ', s)
24
+ s = re_art.sub(' ', s)
25
+ s = ' '.join(s.split())
26
+ return s
27
+
28
+
29
+ class F1Metric:
30
+ """
31
+ Helper class which computes token-level F1.
32
+ """
33
+
34
+ @staticmethod
35
+ def _prec_recall_f1_score(pred_items, gold_items):
36
+ """
37
+ Compute precision, recall and f1 given a set of gold and prediction items.
38
+ :param pred_items: iterable of predicted values
39
+ :param gold_items: iterable of gold values
40
+ :return: tuple (p, r, f1) for precision, recall, f1
41
+ """
42
+ common = Counter(gold_items) & Counter(pred_items)
43
+ num_same = sum(common.values())
44
+ if num_same == 0:
45
+ return 0, 0, 0
46
+ precision = 1.0 * num_same / len(pred_items)
47
+ recall = 1.0 * num_same / len(gold_items)
48
+ f1 = (2 * precision * recall) / (precision + recall)
49
+ return precision, recall, f1
50
+
51
+ @staticmethod
52
+ def compute_each_pair(guess: str, answer: str):
53
+ if answer == "":
54
+ return None, None, None
55
+ if guess == "":
56
+ return 0, 0, 0
57
+ g_tokens = normalize_answer(guess).split()
58
+ a_tokens = normalize_answer(answer).split()
59
+
60
+ precision, recall, f1 = F1Metric._prec_recall_f1_score(g_tokens, a_tokens)
61
+ return precision, recall, f1
62
+
63
+ @staticmethod
64
+ def compute_all_pairs(guesses: List[str], answers: List[list]):
65
+ assert len(guesses) == len(answers)
66
+ precision_list, recall_list, f1_list = [], [], []
67
+ for guess, answer in zip(guesses, answers):
68
+ assert type(answer) == list
69
+ f1_list_tmp = []
70
+ for answer_each in answer:
71
+ answer_each = answer_each.strip()
72
+ if answer_each == "":
73
+ continue
74
+ precision, recall, f1 = F1Metric.compute_each_pair(guess, answer_each)
75
+ f1_list_tmp.append(f1)
76
+
77
+ if len(f1_list_tmp) > 0:
78
+ f1 = max(f1_list_tmp)
79
+ if precision is None or recall is None or f1 is None:
80
+ continue
81
+ precision_list.append(precision)
82
+ recall_list.append(recall)
83
+ f1_list.append(f1)
84
+
85
+ return np.mean(precision_list), np.mean(recall_list), np.mean(f1_list)
evaluation/run_generation_vllm.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from transformers import AutoTokenizer
4
+ from vllm import LLM, SamplingParams
5
+ from arguments import get_args
6
+ from dataset import load_data, get_inputs
7
+ import torch
8
+ import os
9
+
10
+ def get_prompt_list(args):
11
+
12
+ ## get tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained(args.model_id)
14
+
15
+ ## get input data
16
+ if args.eval_dataset == "doc2dial":
17
+ input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
18
+ elif args.eval_dataset == "convfinqa":
19
+ input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
20
+ elif args.eval_dataset == "quac":
21
+ input_datapath = os.path.join(args.data_folder, args.quac_path)
22
+ elif args.eval_dataset == "qrecc":
23
+ input_datapath = os.path.join(args.data_folder, args.qrecc_path)
24
+ elif args.eval_dataset == "doqa_cooking":
25
+ input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
26
+ elif args.eval_dataset == "doqa_travel":
27
+ input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
28
+ elif args.eval_dataset == "doqa_movies":
29
+ input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
30
+ elif args.eval_dataset == "coqa":
31
+ input_datapath = os.path.join(args.data_folder, args.coqa_path)
32
+ elif args.eval_dataset == "sqa":
33
+ input_datapath = os.path.join(args.data_folder, args.sqa_path)
34
+ elif args.eval_dataset == "topiocqa":
35
+ input_datapath = os.path.join(args.data_folder, args.topiocqa_path)
36
+ elif args.eval_dataset == "inscit":
37
+ input_datapath = os.path.join(args.data_folder, args.inscit_path)
38
+ elif args.eval_dataset == "hybridial":
39
+ input_datapath = os.path.join(args.data_folder, args.hybridial_path)
40
+
41
+ else:
42
+ raise Exception("please input a correct eval_dataset name!")
43
+
44
+ data_list = load_data(input_datapath)
45
+ print("number of samples in the dataset:", len(data_list))
46
+ prompt_list = get_inputs(data_list, args.eval_dataset, tokenizer, num_ctx=args.num_ctx, max_output_len=args.out_seq_len)
47
+
48
+ return prompt_list
49
+
50
+
51
+ def main():
52
+ args = get_args()
53
+
54
+ ## bos token for llama-3
55
+ bos_token = "<|begin_of_text|>"
56
+
57
+ ## get model_path
58
+ model_path = os.path.join(args.model_folder, args.model_name)
59
+
60
+ ## get prompt_list
61
+ prompt_list = get_prompt_list(args)
62
+
63
+ ## get output_datapath
64
+ output_datapath = os.path.join(args.output_folder, "%s_output.txt" % args.eval_dataset)
65
+
66
+ ## run inference
67
+ sampling_params = SamplingParams(temperature=0, top_k=1, max_tokens=args.max_tokens)
68
+
69
+ ## This changes the GPU support to 8
70
+ model_vllm = LLM(model_path, tensor_parallel_size=8)
71
+
72
+ output_list = []
73
+ for prompt in prompt_list:
74
+ prompt = bos_token + prompt
75
+ output = model_vllm.generate([prompt], sampling_params)[0]
76
+ generated_text = output.outputs[0].text
77
+ generated_text = generated_text.strip().replace("\n", " ")
78
+
79
+ # print("generated_text:", generated_text)
80
+ output_list.append(generated_text)
81
+
82
+ print("writing to %s" % output_datapath)
83
+ with open(output_datapath, "w") as f:
84
+ for output in output_list:
85
+ f.write(output + "\n")
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()