File size: 130,934 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 |
{
"paper_id": "2019",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:30:14.810510Z"
},
"title": "Non-native Accent Partitioning for Speakers of Indian Regional Languages",
"authors": [
{
"first": "G",
"middle": [],
"last": "Radha Krishna",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "VNRVJIET Hyderabad",
"location": {
"country": "India"
}
},
"email": ""
},
{
"first": "R",
"middle": [],
"last": "Krishnan",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Amritha University Coimbattore",
"location": {
"country": "India"
}
},
"email": ""
},
{
"first": "V",
"middle": [
"K"
],
"last": "Mittal",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "K L University",
"location": {
"settlement": "Vijayawada",
"country": "India"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Acoustic features extracted from the speech signal can help in identifying speaker related multiple information such as geographical origin, regional accent and nativity. In this paper, classification of native speakers of South Indian languages is carried out based upon the accent of their non-native language, i.e., English. Four South Indian languages: Kannada, Malayalam, Tamil, and Telugu are examined. A database of English speech from the native speakers of these languages, along with the native language speech data was collected, from a non-overlapping set of speakers. Segment level acoustic features Mel-frequency cepstral coefficients (MFCCs) and F 0 are used. Accent partitioning of non-native English speech data is carried out using multiple classifiers: k-nearest neighbour (KNN), linear discriminant analysis (LDA) and support vector machine (SVM), for validation and comparison of results. Classification accuracies of 86.6% are observed using KNN, and 89.2% or more than 90% using SVM classifier. A study of acoustic feature F 0 contour, related to L 2 intonation, showed that native speakers of Kannada language are quite distinct as compared to those of Tamil or Telugu languages. It is also observed that identification of Malayalam and Kannada speakers from their English speech accent is relatively easier than Telugu or Tamil speakers.",
"pdf_parse": {
"paper_id": "2019",
"_pdf_hash": "",
"abstract": [
{
"text": "Acoustic features extracted from the speech signal can help in identifying speaker related multiple information such as geographical origin, regional accent and nativity. In this paper, classification of native speakers of South Indian languages is carried out based upon the accent of their non-native language, i.e., English. Four South Indian languages: Kannada, Malayalam, Tamil, and Telugu are examined. A database of English speech from the native speakers of these languages, along with the native language speech data was collected, from a non-overlapping set of speakers. Segment level acoustic features Mel-frequency cepstral coefficients (MFCCs) and F 0 are used. Accent partitioning of non-native English speech data is carried out using multiple classifiers: k-nearest neighbour (KNN), linear discriminant analysis (LDA) and support vector machine (SVM), for validation and comparison of results. Classification accuracies of 86.6% are observed using KNN, and 89.2% or more than 90% using SVM classifier. A study of acoustic feature F 0 contour, related to L 2 intonation, showed that native speakers of Kannada language are quite distinct as compared to those of Tamil or Telugu languages. It is also observed that identification of Malayalam and Kannada speakers from their English speech accent is relatively easier than Telugu or Tamil speakers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Identification of speakers, classification of their dialectal zones is important in a multilingual country like India (Bhattacharjee and Sarmah, 2012) . Speaker uniqueness is manifested in both anatomical and learned traits. When the context is constrained, speaker characteristics can be used reliably to identify individuals (Arslan and Hansen, 1996) . The accent is one of the glaring indications of linguistic and social background of a speaker. Studying the characteristics of dialect on a phonetic or phonemic level belongs to accent recognition . Earlier studies have concluded that native language (L 1 ) affects the speaker's traits of their second language (L 2 ) (Ghorbani et al., 2018; Graham and Post, 2018) . Analysis and classification of utterances that belong to specific groups of learners is the main objective of Native Language Identification (NLI) (Nisioi, 2015) . However, there is very little research on the question of accuracy with which accent features can be used to identify a speaker's regional or ethnic origin (Harper and Maxwell, 2008) . A solution to the problem of regional accent classification across English speaking South Indians is attempted in the present research, using a specifically developed corpus.",
"cite_spans": [
{
"start": 118,
"end": 150,
"text": "(Bhattacharjee and Sarmah, 2012)",
"ref_id": "BIBREF3"
},
{
"start": 327,
"end": 352,
"text": "(Arslan and Hansen, 1996)",
"ref_id": "BIBREF1"
},
{
"start": 674,
"end": 697,
"text": "(Ghorbani et al., 2018;",
"ref_id": "BIBREF12"
},
{
"start": 698,
"end": 720,
"text": "Graham and Post, 2018)",
"ref_id": "BIBREF14"
},
{
"start": 870,
"end": 884,
"text": "(Nisioi, 2015)",
"ref_id": "BIBREF38"
},
{
"start": 1043,
"end": 1069,
"text": "(Harper and Maxwell, 2008)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Discriminative classifiers based on characterizing acoustic differences across foreign accents can be employed to direct an accent dependent recognition system (Omar and Pelecanos, 2010; Ikeno and Hansen, 2006) . Systems with an automatic evaluation of non-native speech, which includes characteristics of the mother tongue will have better performance over similar algorithms that depend upon target languages (Qian et al., 2017) . This is particularly true when the text uttered is unknown. Native listeners are mostly aware of the speaker's regional accent and also the social or geographical subgroup within the region (Hanani et al., 2013) . Automatic speaker characterization is vital in real-world applications and the advantages are widely open (Zampieri et al., 2017; Krishna and Krishnan, 2014) .",
"cite_spans": [
{
"start": 160,
"end": 186,
"text": "(Omar and Pelecanos, 2010;",
"ref_id": "BIBREF39"
},
{
"start": 187,
"end": 210,
"text": "Ikeno and Hansen, 2006)",
"ref_id": "BIBREF20"
},
{
"start": 411,
"end": 430,
"text": "(Qian et al., 2017)",
"ref_id": null
},
{
"start": 623,
"end": 644,
"text": "(Hanani et al., 2013)",
"ref_id": "BIBREF16"
},
{
"start": 753,
"end": 776,
"text": "(Zampieri et al., 2017;",
"ref_id": null
},
{
"start": 777,
"end": 804,
"text": "Krishna and Krishnan, 2014)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Pattern recognition approach of collecting data, extracting suitable features, and training classification module using machine learning is a powerful tool in applications like Computer-Assisted-Pronunciation-Training (CAPT) programs. Acoustic descriptors are critical in tasks such as sound Classification (Day and Nandi, 2007) . State-of-the-art Accent Identification (AID) systems widely rely on spectral acoustic distribution for modeling the pronunciation. In applications like accent recognition, features distinguishing different phonemes of a language will be useful . Languagespecific differences in phonological development might be related to differences in phoneme and phoneme sequence frequency across languages (Ikeno and Hansen, 2006) . Such variations are also represented by the intonation patterns of individuals (Mary and Yegnanarayana, 2008; Li et al., 2017) . Apart from cepstral features that capture underlying acoustic characteristics, information from higher-level prosodic traits (Doddington, 2001; MALMASI and DRAS, 2017) were examined in the present study.",
"cite_spans": [
{
"start": 307,
"end": 328,
"text": "(Day and Nandi, 2007)",
"ref_id": "BIBREF7"
},
{
"start": 725,
"end": 749,
"text": "(Ikeno and Hansen, 2006)",
"ref_id": "BIBREF20"
},
{
"start": 831,
"end": 861,
"text": "(Mary and Yegnanarayana, 2008;",
"ref_id": "BIBREF30"
},
{
"start": 862,
"end": 878,
"text": "Li et al., 2017)",
"ref_id": "BIBREF26"
},
{
"start": 1006,
"end": 1024,
"text": "(Doddington, 2001;",
"ref_id": "BIBREF8"
},
{
"start": 1025,
"end": 1048,
"text": "MALMASI and DRAS, 2017)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "English is the most widely spoken second language in India and elsewhere in the world (Saha and Mandal, 2015; Guntur et al., 2018) . Indian English has several varieties with their specific accents and phonological features and often a distinct lexicon. Research on spoken English of Indian speakers is urgently needed from a multidisciplinary perspective (Cheng et al., 2013; Krishna et al., 2019) . Present work is aimed at comparing the acoustic properties that are likely to differ between English accents different groups of South Indian language of speakers. The nonnative prosodic traits are a hindrance to proficiency in a second language (L 2 ), and also to the mutual understanding. Present work also examines the local prosodic changes in the non-native English speech, without incorporating any phonol-ogy of the specific languages. The ability to compensate against prosodic deviation during English production can be improved by identifying the articulatory gestures that emphasize the non-native speaker accent. The paper is organized as follows: Section 2 presents the details of the database, including the recording methodology. Section 3 describes acoustic and prosodic features used in foreign accent recognition. Section 4 describes the classification procedures employed in the NLI experiments. Section 5 gives the details of the experiments and results. Analysis of results of regional accent classification is given in section 6. Section 7 describes the key outcome and contributions. Conclusions drawn are given in Section 8.",
"cite_spans": [
{
"start": 86,
"end": 109,
"text": "(Saha and Mandal, 2015;",
"ref_id": "BIBREF43"
},
{
"start": 110,
"end": 130,
"text": "Guntur et al., 2018)",
"ref_id": "BIBREF15"
},
{
"start": 356,
"end": 376,
"text": "(Cheng et al., 2013;",
"ref_id": "BIBREF6"
},
{
"start": 377,
"end": 398,
"text": "Krishna et al., 2019)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The main focus of current research work is on differentiating the regional non-native English accents of speakers, and also describing foreign accent in terms of a common set of fundamental speech attributes. A database has been specifically developed (G.Radha with native and non-native speech samples containing utterance by the speakers belonging to language groups Kannada (KAN), Malayalam (MAL), Tamil (TAM), and Telugu (TEL). Table 2 shows the template of file naming process.",
"cite_spans": [],
"ref_spans": [
{
"start": 432,
"end": 439,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Data Sets of 4 Indian Regional Languages",
"sec_num": "2"
},
{
"text": "Among more than six thousand languages in the world, less than 10% of the languages are spoken by more than 90% of the people. Speakers and learners of the English language constitute a large proportion in countries like India, South Africa, and much of the developing world. India has distinct linguistic communities, each of which shares a common language and culture. English, Hindi and dominant local languages are spoken nonnatively by a large number of Indians. In South Indian cities, many people speak at least two second languages. It would be beneficial if speech based systems can store models of all known languages and carry out the task of NLI automatically. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selection of Regional Languages",
"sec_num": "2.1"
},
{
"text": "The details of speech corpus developed for each of the languages is shown in Table 1 . Native speech utterances of 20 speakers from each of the native language groups KAN, TAM, and TEL, each with a duration of 300 seconds formed the training set. English test samples for a duration of 60 seconds were collected from 25 speakers belonging to each of the four groups KAN, MAL, TAM, and TEL. As the sufficient number of native speakers of MAL are not readily available, it is included in the testing set only. The test utterances were recorded under identical conditions as training speech samples and there is no overlap between training and testing sets with respect to speakers and sentences. Each of the test samples is recorded for a duration of 60 seconds. The nonnative English speech samples are collected from a set of speakers with nearly uniform geographical distribution within a region with an educational background of at least graduation, but who do not use English routinely.",
"cite_spans": [],
"ref_spans": [
{
"start": 77,
"end": 84,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Speech Corpus Recording Methodology",
"sec_num": "2.2"
},
{
"text": "Recordings of speakers were made in quiet office room conditions using Logitech h110 microphone and waveforms are sampled at a rate of 16 kHz. The recordings were made in a laboratory environment with written text, with negligible re- Attitudinal, Accentual, Discourse, Grammatical verberation. The participants were asked to read aloud passages of a text from general topics. For applications like screening of non-native speech, read data can be used for both training and testing (Schuller et al., 2013) . It is ensured that Gender weightages are equally distributed in training as well as testing data sets. The speakers in the training set are considered representative of the regional languages KAN, TAM and TEL. However, for testing set speakers of Malayalam were also included. These speakers are so chosen from language heartlands. The speakers in the test set are considered potential users of future systems augmented with automatic Accent Identification (AID) capability.",
"cite_spans": [
{
"start": 483,
"end": 506,
"text": "(Schuller et al., 2013)",
"ref_id": "BIBREF44"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Speech Corpus Recording Methodology",
"sec_num": "2.2"
},
{
"text": "Understanding similar variations in foreign accents is a crucial factor for the development of an NLI system. The dominant articulatory traits of different languages are different (Koreman, 2018) . In applications like accent recognition, features distinguishing different phonemes of a language will be useful (Li et al., 2013) . The acoustic signature or the voice individuality of the speech signal are available as differences in transformations occurring at semantic, linguistic articulatory, and acoustic levels. Out of all the factors affecting speech, accent is a week factor in the sense that speech variation is not as evident as that due to speaker/gender. Language-specific differences in phonological development might be related to differences in phoneme and phoneme sequence frequency across languages (Graham and Post, 2018) . Speakers of the second language (SL) are expected to import certain patterns from their native language (NL) Figure 1 : Front end signal processing for feature extraction which are audible in SL. The influence of the surrounding speech prosody on new-born cry melody has been shown (Monnin and Loevenbruck, 2010) . The non-native speech detection is thus very challenging .",
"cite_spans": [
{
"start": 180,
"end": 195,
"text": "(Koreman, 2018)",
"ref_id": "BIBREF22"
},
{
"start": 311,
"end": 328,
"text": "(Li et al., 2013)",
"ref_id": "BIBREF25"
},
{
"start": 817,
"end": 840,
"text": "(Graham and Post, 2018)",
"ref_id": "BIBREF14"
},
{
"start": 1125,
"end": 1155,
"text": "(Monnin and Loevenbruck, 2010)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [
{
"start": 952,
"end": 960,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Features for Non-native Accent Partitioning",
"sec_num": "3"
},
{
"text": "Characterization of a foreign accent is mostly based on either auditory analysis or manual transcriptions of deviations. The auditory spectrum is consistent with several phenomena observed in speech perception and is useful in automatic speaker independent speech recognition. Features used for nonnativeness detection include cepstral vectors, phone strings and a variety of prosodic features, but when used alone, systems based on acoustic features perform better (Shriberg et al., 2005) . We can consider acoustic features, which are proxy of phonetic reproduction as acousticphonetic features (Li et al., 2013) .",
"cite_spans": [
{
"start": 466,
"end": 489,
"text": "(Shriberg et al., 2005)",
"ref_id": "BIBREF45"
},
{
"start": 597,
"end": 614,
"text": "(Li et al., 2013)",
"ref_id": "BIBREF25"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Features for Non-native Accent Partitioning",
"sec_num": "3"
},
{
"text": "Earlier investigations on text-independent nonnative speech tied to underlying native language structure are based on (i) Global acoustic distribution of phonemes (which requires no language knowledge) (ii) Different intonations corresponding to uniqueness in the manner in which articulators are manipulated. The shape of the vocal tract is manifested in the envelope of the shorttime power spectrum (Reynolds and Rose, 1995) . The attributes that contain speaker identifiability for machine as well as for humans are of interest (Zheng et al., 2007; .",
"cite_spans": [
{
"start": 401,
"end": 426,
"text": "(Reynolds and Rose, 1995)",
"ref_id": "BIBREF42"
},
{
"start": 531,
"end": 551,
"text": "(Zheng et al., 2007;",
"ref_id": "BIBREF50"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Acoustic Features",
"sec_num": "3.1"
},
{
"text": "In this study, acoustic features used for phonetic modeling of the accent differences consists of the cepstral features: Perceptive Linear Prediction Coefficients (PLPs), Linear Predictive Cepstral Coefficients (LPCCs), and MFCCs (Hermansky, 1990; Luengo et al., 2008; Mittal and Yegnanarayana, 2013) . The steps followed are shown in Figure Figure 2 : Waveform and Pitch contour of non-native English speech by female Kannada speaker 1. Given all the alternative spectral features based on LPC -cepstrum and FFT cepstrum for speaker recognition, MFCCs, give a highly compact representation of the spectral envelope of a sound (L\u00f3pez, 2014) . The LPCCs are known to capture extra information from a speech that discriminates different languages. The PLPs which take advantage of psychoacoustic principles are robust against noise. A hierarchy of speech characteristics, related speaker traits, and possible speech features are listed in Table 3 .",
"cite_spans": [
{
"start": 230,
"end": 247,
"text": "(Hermansky, 1990;",
"ref_id": "BIBREF18"
},
{
"start": 248,
"end": 268,
"text": "Luengo et al., 2008;",
"ref_id": "BIBREF28"
},
{
"start": 269,
"end": 300,
"text": "Mittal and Yegnanarayana, 2013)",
"ref_id": "BIBREF31"
},
{
"start": 629,
"end": 642,
"text": "(L\u00f3pez, 2014)",
"ref_id": "BIBREF27"
}
],
"ref_spans": [
{
"start": 335,
"end": 352,
"text": "Figure Figure 2",
"ref_id": null
},
{
"start": 939,
"end": 946,
"text": "Table 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Acoustic Features",
"sec_num": "3.1"
},
{
"text": "The prosodic structure is a critical aspect of language contact and gives important information related to the speaking habit of a person (Kinnunen and Li, 2010; Farr\u00fas et al., 2010) . The goal is to capture prosodic idiosyncrasies of speakers belonging to different native languages. Prosodic cues Stress, Rhythm, and Intonation are each complex entities expressed using (i) Pitch (ii) Energy (iii) Duration. Major text-independent features used in prosodic analysis are given in Table 4 .",
"cite_spans": [
{
"start": 138,
"end": 161,
"text": "(Kinnunen and Li, 2010;",
"ref_id": "BIBREF21"
},
{
"start": 162,
"end": 182,
"text": "Farr\u00fas et al., 2010)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [
{
"start": 481,
"end": 488,
"text": "Table 4",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Prosodic Features",
"sec_num": "3.2"
},
{
"text": "In this study Prosodic statistics were obtained by performing different measurements of pitch, which are derived supra segmentally. The power of accent in voice identification is investigated as explained below. A Generative model of pronunciation describes what is acceptable, and Discriminative model both acceptable and unacceptable pronunciation, and the pronunciation score is the direct output of the classification module.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Prosodic Features",
"sec_num": "3.2"
},
{
"text": "Non-native prosodic traits limit proficiency in a second language (L 2 ). Prosodic phenomena located on word level and above, help listeners to structure the speech signal and to process the linguistic content successfully. Table 4 shows some of the features useful for detecting non-native speech without annotation of prosodic events. The Figure 3 : Distribution of MFCC Coefficients as a Scatter plot of C 0 versus C 1 for native ENGLISH speakers Figure 4 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by KANNADA speakers experiment by Rosenberg to foil a Speaker Verification system says that even an identical twin was unable to imitate the enrolled sibling well enough to get accepted by the system, tells the need to look at learned speaking behaviour.",
"cite_spans": [],
"ref_spans": [
{
"start": 224,
"end": 231,
"text": "Table 4",
"ref_id": "TABREF3"
},
{
"start": 341,
"end": 349,
"text": "Figure 3",
"ref_id": null
},
{
"start": 450,
"end": 458,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Prosodic Features",
"sec_num": "3.2"
},
{
"text": "Speaker Classification can be conveniently defined as a grouping of speakers speaking in a similar manner, on the basis of acoustic characteristics (Chen et al., 2014) . Classification of foreign accents directly from the acoustic features is at- Figure 5 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by MALAYALAM speakers Figure 6 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by TAMIL speakers Figure 7 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by TELUGU speakers tempted by using a test data set described in Table 1. The role of accent in voice identification is investigated as explained below. There exists a significant overlap between NLI approaches and computational methods for dialect and language identification (LID), and Support Vector Machine (SVM) classifiers are a very good fit for NLI (Zampieri et al., 2017) .",
"cite_spans": [
{
"start": 148,
"end": 167,
"text": "(Chen et al., 2014)",
"ref_id": "BIBREF5"
},
{
"start": 881,
"end": 904,
"text": "(Zampieri et al., 2017)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 247,
"end": 255,
"text": "Figure 5",
"ref_id": null
},
{
"start": 348,
"end": 356,
"text": "Figure 6",
"ref_id": null
},
{
"start": 445,
"end": 453,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Classification for Non-native Accent Partitioning",
"sec_num": "4"
},
{
"text": "SVM is one of the most popular supervised classifiers on a wide range of data sets, which looks for a maximum-margin hyper plane for data separation (Wu et al., 2010; Bahari et al., 2013; Campbell et al., 2006) . Accuracies of non-native accent classification were studied for the present problem by using the SVM classifier. The speech signal is first processed to extract attributes relevant to the foreign accent (Moustroufas and Digalakis, 2007) . The most representative acoustic features, the LPCC, the PLP (Li et al., 2013) have been tested but were found to be less efficient. The input to the system is a 13 dimensional MFCC vector consisting of 12 cepstral coefficients and one energy coefficient. Thus the front end for the proposed classification system consisted of only 13 dimensional MFCC vector including C 0 . ",
"cite_spans": [
{
"start": 149,
"end": 166,
"text": "(Wu et al., 2010;",
"ref_id": "BIBREF48"
},
{
"start": 167,
"end": 187,
"text": "Bahari et al., 2013;",
"ref_id": "BIBREF2"
},
{
"start": 188,
"end": 210,
"text": "Campbell et al., 2006)",
"ref_id": "BIBREF4"
},
{
"start": 416,
"end": 449,
"text": "(Moustroufas and Digalakis, 2007)",
"ref_id": "BIBREF36"
},
{
"start": 513,
"end": 530,
"text": "(Li et al., 2013)",
"ref_id": "BIBREF25"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Accent Partitioning using SVM Classifier",
"sec_num": "4.1"
},
{
"text": "Native traits located at a word and sentence levels help listeners structure the speech signal. In many approaches that apply prosody to either Language Identification (LID) or Speaker Recognition, extracted features are based on statistics of pitch / energy contour segments or piecewise linear stylization of pitch / energy contours. Intonation is a key expressive factor which can covey the intent of a speaker, contains a lot more information than words and utterance (Ward et al., 2017) . Intonation is more used than energy and duration features in the context of prosody. Listeners can discern a speaker's regional accent from intonation alone (Eady and Cooper, 1986; Tepperman and Narayanan, 2008) .",
"cite_spans": [
{
"start": 472,
"end": 491,
"text": "(Ward et al., 2017)",
"ref_id": "BIBREF47"
},
{
"start": 651,
"end": 674,
"text": "(Eady and Cooper, 1986;",
"ref_id": "BIBREF9"
},
{
"start": 675,
"end": 705,
"text": "Tepperman and Narayanan, 2008)",
"ref_id": "BIBREF46"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Intonation Analysis",
"sec_num": "4.2"
},
{
"text": "Dynamics of F 0 contour corresponding to a sound is influenced by several factors such as the identity of the sound unit, its context, the speaking style of the speaker, intonation rules of the language, type of the sentence, etc. (Arias et al., 2010) . The focus was mainly on the pitch since it is one of the most important characteristics of prosody and helps in predicting human intonation rating. These suprasegmental parameters can be used to model non-native English prosody (H\u00f6nig et al., 2012) . In the present study, the main aim is to ascertain the influence of linguistic background on F 0 across regional varieties of English, future studies are planned to include the aperiodic components of excitation of expressive voices like Noh voice (Mittal and Yegnanarayana, 2015) ",
"cite_spans": [
{
"start": 231,
"end": 251,
"text": "(Arias et al., 2010)",
"ref_id": "BIBREF0"
},
{
"start": 482,
"end": 502,
"text": "(H\u00f6nig et al., 2012)",
"ref_id": "BIBREF19"
},
{
"start": 753,
"end": 785,
"text": "(Mittal and Yegnanarayana, 2015)",
"ref_id": "BIBREF32"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Intonation Analysis",
"sec_num": "4.2"
},
{
"text": "To validate the hypothesis that the accent of the mother tongue is separable, experiments were performed to understand and to calibrate idiolectal differences in the non-native speech samples of the language groups KAN, MAL, TAM and TEL. The corpus is sampled at 16000 samples per second and the bit rate was 32 bits per sample. Silence removal has been implemented using a VAD algorithm (Kinnunen and Li, 2010) . The feature vectors are computed over 20 msec windowed frames every 10 msec. Fourier spectra were computed for sequential frames 160 points apart by using a 320 point Hamming window. Finally Cepstral Mean Normalization (CMN) is applied by subtracting the mean value of each feature over the entire utterance. MFCCs are generated by windowing the signal, application of DFT, taking the log of the magnitude and warping the frequencies on Mel scale and finally application of DCT.",
"cite_spans": [
{
"start": 388,
"end": 411,
"text": "(Kinnunen and Li, 2010)",
"ref_id": "BIBREF21"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments and Results",
"sec_num": "5"
},
{
"text": "Experiments were performed to establish the differences in the distribution of acoustic features in the non-native speech samples of four language groups KAN, MAL,TAM, and TEL. Graphical il- lustration of accent partitioning on test data is shown in Figures 3,4 ,5,6,7, and 8. It indicates that the high classification accuracies are possible in the present task. Classification of foreign accents directly from the acoustic features is attempted, by using data set described in Table 1 . Figure 9 shows the confusion matrix for best performing SVM classifier for the five class classification. Figure 11 shows the confusion matrix for the three class classification.",
"cite_spans": [],
"ref_spans": [
{
"start": 250,
"end": 261,
"text": "Figures 3,4",
"ref_id": null
},
{
"start": 479,
"end": 486,
"text": "Table 1",
"ref_id": "TABREF0"
},
{
"start": 489,
"end": 497,
"text": "Figure 9",
"ref_id": "FIGREF1"
},
{
"start": 595,
"end": 604,
"text": "Figure 11",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Non-native Accent Classification based upon Acoustic Features",
"sec_num": "5.1"
},
{
"text": "The confusion matrix indicates that the identification rates for Kannada and Tamil language speakers from their non-native English speech can be high compared to that of Telugu native speakers . The Receiver Operating Point Curve (ROC) shown in Figure 10 is a plot of true positive rate as a function of false positive rate, which is very close to the upper left hand corner, indicates that the classifiers can achieve good overall accuracies.",
"cite_spans": [],
"ref_spans": [
{
"start": 245,
"end": 254,
"text": "Figure 10",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Non-native Accent Classification based upon Acoustic Features",
"sec_num": "5.1"
},
{
"text": "Verification of accent partitioning of non-native speech using a series of classification techniques: k-nearest neighbourhood, and Linear Discriminant Analysis was also implemented. English speech samples of the native speakers of KAN, MAL, TAM, and TEL are tested against standard English speech corpus using TIMIT corpus. The resulting accuracies are 86.6% when a KNN clssifier is used, 82.5% when Discrimination classifier is used, and 89.2% using SVM classifier is used. These results are consolidated in Table 5 . Figure 4 , and 6 shows the corresponding confusion matrices, obtained during SVM classification. ",
"cite_spans": [],
"ref_spans": [
{
"start": 509,
"end": 516,
"text": "Table 5",
"ref_id": null
},
{
"start": 519,
"end": 528,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Non-native Accent Classification based upon Acoustic Features",
"sec_num": "5.1"
},
{
"text": "Experiments were conducted on native and nonnative speech samples of bilingual and multilingual speakers. The pitch frequency was extracted using the \"pitch contour\" function of the Wave Surfer software, and F 0 data was extracted. Typical waveform showing the non-native speech by a female Kannada speaker and the pitch contour were shown in Figure 2 . The speakers in this study were asked to speak in their mother tongue or in English, and 20 exemplars were analysed from each group KAN, TAM, and TEL. In few cases the same speakers have spoken in other Indian language of the neighbouring state.",
"cite_spans": [],
"ref_spans": [
{
"start": 343,
"end": 351,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Foreign Accent Discrimination based upon Prosodic Features",
"sec_num": "5.2"
},
{
"text": "The difference in F 0 contour between native and non-native speech for speakers from each group has been tested. These results shown in Table 6 clearly indicate that the mean value of nonnative pitch is markedly high in the case nonnative speakers in all the three groups. The percentage deviation from native language to English speech for a group of 20 speakers in each of the three languages has been estimated and is presented in Table 7 . It is evident from the scores presented in Table 7 that the dynamic variation of pitch is the least at 3.7% for the regional variant of KAN speakers, which is significantly less when compared to 9.5%,and 27% corresponding to native TAM and TEL speakers respectively. ",
"cite_spans": [],
"ref_spans": [
{
"start": 136,
"end": 144,
"text": "Table 6",
"ref_id": "TABREF4"
},
{
"start": 435,
"end": 442,
"text": "Table 7",
"ref_id": "TABREF5"
},
{
"start": 488,
"end": 495,
"text": "Table 7",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Foreign Accent Discrimination based upon Prosodic Features",
"sec_num": "5.2"
},
{
"text": "\u2022 Figures 3,4 ,5, and 8 reveal that the English spoken by native Kannada and Malayalam speakers is distinct than native Tamil or Telugu speakers, when compared to standard English.",
"cite_spans": [],
"ref_spans": [
{
"start": 2,
"end": 13,
"text": "Figures 3,4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "6"
},
{
"text": "\u2022 Accent partitioning experiments from a short utterance of 60 seconds of test data, indicates the suitability of the SVM classifier, as can be seen from accuracies shown in Table 5 .",
"cite_spans": [],
"ref_spans": [
{
"start": 174,
"end": 181,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "6"
},
{
"text": "\u2022 Figure 1 reveals that the English spoken by Telugu native speakers are marginally closer to standard English, compared to that of Kannada and Malayalam language speakers.",
"cite_spans": [],
"ref_spans": [
{
"start": 2,
"end": 10,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "6"
},
{
"text": "\u2022 Higher mean values of the non-native pitch shown in Table 6 indicates the accommodation of speakers of all native languages to suit different social groups.",
"cite_spans": [],
"ref_spans": [
{
"start": 54,
"end": 61,
"text": "Table 6",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "6"
},
{
"text": "\u2022 Table 7 shows that English speakers of Tamil and Telugu would produce statistically significant higher pitch contour deviations than KAN speakers.",
"cite_spans": [],
"ref_spans": [
{
"start": 2,
"end": 9,
"text": "Table 7",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "6"
},
{
"text": "\u2022 A framework to handle the deviations of L 2 influenced by closely related L 1 s and to achieve better performance for a given NLI task, even with fewer features is proposed",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Key Outcome and Contributions",
"sec_num": "7"
},
{
"text": "\u2022 Current study is significant when the target languages are linguistically close, and large resources of spoken English are not available",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Key Outcome and Contributions",
"sec_num": "7"
},
{
"text": "\u2022 Prosodic differences across the South Indian English accents has been experimentally illustrated, which is useful in automatic intonation classification for L 2 speech acquisition. Language group Male Female Average Kannada 0.9 6.5 3.7 Tamil 9 10 9.5 Telugu 33 21 27",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Key Outcome and Contributions",
"sec_num": "7"
},
{
"text": "\u2022 Present work helps in accurate recognition of regional accent, that can improve the speech and speaker recognition system performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Key Outcome and Contributions",
"sec_num": "7"
},
{
"text": "\u2022 Distinct pitch pattern variations in non-native English speech by Malayalam, and Kannada speakers compared to that of Tamil and Telugu varieties can help in distinguishing them.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Key Outcome and Contributions",
"sec_num": "7"
},
{
"text": "It can be concluded that the regional native language classification has been achieved with an accuracy of nearly 90%, by using the acoustic distribution of cepstral features on the four types of non-native South Indian English speech. It is known that systems make more mistakes among regionally close languages. Accent differences among the non-native speakers are reflected as the deviation of L 2 influenced by L 1 on prosodic level. Studies carried out based on intonation distribution indicates that English speaking South Indian groups corresponding to Kannada, Malayalam, Tamil, and Telugu are clearly divided as per their native languages. Prosodic differences in the native and English speech by South Indian speakers were detected without annotation. Present method can potentially be applied to other languages like Hindi, and in addressing the important question of finding a universal feature set for identifying the non-native speech. Present research is useful in applications such as voice based wireless services like mobile health care, agriculture. Automatic accent characterization can also be applied to fields such as sociolinguistics and speech pathology. Future work can employ different speech styles, and characteristics of speaker population to be carefully scrutinized, and also by including multi-disciplinary information. Further, the results can be extended to separating language families and also for rating L 2 proficiency.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Automatic intonation assessment for computer aided language learning",
"authors": [
{
"first": "Juan",
"middle": [
"Pablo"
],
"last": "Arias",
"suffix": ""
},
{
"first": "Nestor",
"middle": [],
"last": "Becerra Yoma",
"suffix": ""
},
{
"first": "Hiram",
"middle": [],
"last": "Vivanco",
"suffix": ""
}
],
"year": 2010,
"venue": "Speech Communication",
"volume": "52",
"issue": "3",
"pages": "254--267",
"other_ids": {
"DOI": [
"10.1016/j.specom.2009.11.001"
]
},
"num": null,
"urls": [],
"raw_text": "Juan Pablo Arias, Nestor Becerra Yoma, and Hiram Vi- vanco. 2010. Automatic intonation assessment for computer aided language learning. Speech Commu- nication, 52(3):254-267.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Language accent classification in American English",
"authors": [
{
"first": "M",
"middle": [],
"last": "Levent",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Arslan",
"suffix": ""
},
{
"first": "H",
"middle": [
"L"
],
"last": "John",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hansen",
"suffix": ""
}
],
"year": 1996,
"venue": "Speech Communication",
"volume": "18",
"issue": "4",
"pages": "353--367",
"other_ids": {
"DOI": [
"10.1016/0167-6393(96)00024-6"
]
},
"num": null,
"urls": [],
"raw_text": "Levent M. Arslan and John H.L. Hansen. 1996. Lan- guage accent classification in American English. Speech Communication, 18(4):353-367.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Accent Recognition Using I-vector , Gaussian Mean Supervector and Gaussian Posterior probability Supervector for Spontaneous Telephone Speech",
"authors": [
{
"first": "Mohamad",
"middle": [],
"last": "Hasan Bahari",
"suffix": ""
},
{
"first": "Rahim",
"middle": [],
"last": "Saeidi",
"suffix": ""
},
{
"first": "Hugo",
"middle": [],
"last": "Van Hamme",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Van Leeuwen",
"suffix": ""
}
],
"year": 2013,
"venue": "IEEE International Conference on Acoustics, Speech and Signal Processing",
"volume": "",
"issue": "",
"pages": "7344--7348",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohamad Hasan Bahari, Rahim Saeidi, Hugo Van Hamme, and David Van Leeuwen. 2013. Ac- cent Recognition Using I-vector , Gaussian Mean Supervector and Gaussian Posterior probability Supervector for Spontaneous Telephone Speech. ICASSP, IEEE International Conference on Acous- tics, Speech and Signal Processing, pages 7344- 7348.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Gmmubm based speaker verification in multilingual environments",
"authors": [
{
"first": "Utpal",
"middle": [],
"last": "Bhattacharjee",
"suffix": ""
},
{
"first": "Kshirod",
"middle": [],
"last": "Sarmah",
"suffix": ""
}
],
"year": 2012,
"venue": "International Journal of Computer Science Issues (IJCSI)",
"volume": "9",
"issue": "6",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Utpal Bhattacharjee and Kshirod Sarmah. 2012. Gmm- ubm based speaker verification in multilingual envi- ronments. International Journal of Computer Sci- ence Issues (IJCSI), 9(6):373.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Svm based speaker verification using a gmm supervector kernel and nap variability compensation",
"authors": [
{
"first": "W",
"middle": [
"M"
],
"last": "Campbell",
"suffix": ""
},
{
"first": "D",
"middle": [
"E"
],
"last": "Sturim",
"suffix": ""
},
{
"first": "D",
"middle": [
"A"
],
"last": "Reynolds",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Solomonoff",
"suffix": ""
}
],
"year": 2006,
"venue": "IEEE International Conference on Acoustics Speech and Signal Processing Proceedings",
"volume": "1",
"issue": "",
"pages": "I--I",
"other_ids": {
"DOI": [
"10.1109/ICASSP.2006.1659966"
]
},
"num": null,
"urls": [],
"raw_text": "W. M. Campbell, D. E. Sturim, D. A. Reynolds, and A. Solomonoff. 2006. Svm based speaker verifica- tion using a gmm supervector kernel and nap vari- ability compensation. In 2006 IEEE International Conference on Acoustics Speech and Signal Pro- cessing Proceedings, volume 1, pages I-I.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Characterizing phonetic transformations and acoustic differences across English dialects",
"authors": [
{
"first": "Nancy",
"middle": [
"F"
],
"last": "Chen",
"suffix": ""
},
{
"first": "Sharon",
"middle": [
"W"
],
"last": "Tam",
"suffix": ""
},
{
"first": "Wade",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "Joseph",
"middle": [
"P"
],
"last": "Campbell",
"suffix": ""
}
],
"year": 2014,
"venue": "IEEE Transactions on Audio, Speech and Language Processing",
"volume": "22",
"issue": "1",
"pages": "110--124",
"other_ids": {
"DOI": [
"10.1109/TASLP.2013.2285482"
]
},
"num": null,
"urls": [],
"raw_text": "Nancy F. Chen, Sharon W. Tam, Wade Shen, and Joseph P. Campbell. 2014. Characterizing phonetic transformations and acoustic differences across En- glish dialects. IEEE Transactions on Audio, Speech and Language Processing, 22(1):110-124.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Automatic accent quantification of indian speakers of english",
"authors": [
{
"first": "Jian",
"middle": [],
"last": "Cheng",
"suffix": ""
},
{
"first": "Nikhil",
"middle": [],
"last": "Bojja",
"suffix": ""
},
{
"first": "Xin",
"middle": [],
"last": "Chen",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jian Cheng, Nikhil Bojja, and Xin Chen. 2013. Au- tomatic accent quantification of indian speakers of english. In INTERSPEECH.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Robust textindependent speaker verification using genetic programming",
"authors": [
{
"first": "P",
"middle": [],
"last": "Day",
"suffix": ""
},
{
"first": "A",
"middle": [
"K"
],
"last": "Nandi",
"suffix": ""
}
],
"year": 2007,
"venue": "IEEE Transactions on Audio, Speech, and Language Processing",
"volume": "15",
"issue": "1",
"pages": "285--295",
"other_ids": {
"DOI": [
"10.1109/TASL.2006.876765"
]
},
"num": null,
"urls": [],
"raw_text": "P. Day and A. K. Nandi. 2007. Robust text- independent speaker verification using genetic pro- gramming. IEEE Transactions on Audio, Speech, and Language Processing, 15(1):285-295.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Speaker recognition based on idiolectal differences between speakers",
"authors": [
{
"first": "George",
"middle": [
"R"
],
"last": "Doddington",
"suffix": ""
}
],
"year": 2001,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George R. Doddington. 2001. Speaker recognition based on idiolectal differences between speakers. In INTERSPEECH.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Speech intonation and focus location in matched statements and questions",
"authors": [
{
"first": "J",
"middle": [],
"last": "Stephen",
"suffix": ""
},
{
"first": "William",
"middle": [
"E"
],
"last": "Eady",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Cooper",
"suffix": ""
}
],
"year": 1986,
"venue": "The Journal of the Acoustical Society of America",
"volume": "80",
"issue": "2",
"pages": "402--415",
"other_ids": {
"DOI": [
"10.1121/1.394091"
]
},
"num": null,
"urls": [],
"raw_text": "Stephen J. Eady and William E. Cooper. 1986. Speech intonation and focus location in matched statements and questions. The Journal of the Acoustical Society of America, 80(2):402-415.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Automatic speaker recognition as a measurement of voice imitation and conversion",
"authors": [
{
"first": "Mireia",
"middle": [],
"last": "Farr\u00fas",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Wagner",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Erro",
"suffix": ""
},
{
"first": "Javier",
"middle": [
"Hernando"
],
"last": "",
"suffix": ""
}
],
"year": 2010,
"venue": "International Journal of Speech, Language and the Law",
"volume": "17",
"issue": "1",
"pages": "119--142",
"other_ids": {
"DOI": [
"10.1558/ijsll.v17i1.119"
]
},
"num": null,
"urls": [],
"raw_text": "Mireia Farr\u00fas, Michael Wagner, Daniel Erro, and Javier Hernando. 2010. Automatic speaker recognition as a measurement of voice imitation and conversion. International Journal of Speech, Language and the Law, 17(1):119-142.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Combination of machine scores for automatic grading of pronunciation quality",
"authors": [
{
"first": "Horacio",
"middle": [],
"last": "Franco",
"suffix": ""
},
{
"first": "Leonardo",
"middle": [],
"last": "Neumeyer",
"suffix": ""
}
],
"year": 2000,
"venue": "Speech Communication",
"volume": "30",
"issue": "2",
"pages": "121--130",
"other_ids": {
"DOI": [
"10.1016/S0167-6393(99)00045-X"
]
},
"num": null,
"urls": [],
"raw_text": "Horacio Franco, Leonardo Neumeyer, Vassilios Di- galakis, and Orith Ronen. 2000. Combination of machine scores for automatic grading of pronunci- ation quality. Speech Communication, 30(2):121- 130.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Leveraging native language information for improved accented speech recognition",
"authors": [
{
"first": "Shahram",
"middle": [],
"last": "Ghorbani",
"suffix": ""
},
{
"first": "H L",
"middle": [],
"last": "John",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hansen",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "2449--2453",
"other_ids": {
"DOI": [
"10.21437/Interspeech.2018-1378"
]
},
"num": null,
"urls": [],
"raw_text": "Shahram Ghorbani, John H L Hansen, Robust Speech, and Systems Crss. 2018. Leveraging native lan- guage information for improved accented speech recognition. (September):2449-2453.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Native Language Identification from South Indian English Speech",
"authors": [
{
"first": "R",
"middle": [],
"last": "Krishnan",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Radha",
"middle": [],
"last": "Krishna",
"suffix": ""
},
{
"first": "Vinay",
"middle": [],
"last": "Kumar Mittal",
"suffix": ""
}
],
"year": 2018,
"venue": "Workshop on Machine Learning in Speech and Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Krishnan G.Radha Krishna and Vinay Kumar Mit- tal. 2018. Native Language Identification from South Indian English Speech. In Workshop on Ma- chine Learning in Speech and Language Processing, September 7th, 2018.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Second language acquisition of intonation: Peak alignment in American English",
"authors": [
{
"first": "Calbert",
"middle": [],
"last": "Graham",
"suffix": ""
},
{
"first": "Brechtje",
"middle": [],
"last": "Post",
"suffix": ""
}
],
"year": 2018,
"venue": "Journal of Phonetics",
"volume": "66",
"issue": "",
"pages": "1--14",
"other_ids": {
"DOI": [
"10.1016/j.wocn.2017.08.002"
]
},
"num": null,
"urls": [],
"raw_text": "Calbert Graham and Brechtje Post. 2018. Second lan- guage acquisition of intonation: Peak alignment in American English. Journal of Phonetics, 66:1-14.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Prosodic Analysis of Non-Native South Indian English Speech",
"authors": [
{
"first": "R",
"middle": [],
"last": "Radha Krishna Guntur",
"suffix": ""
},
{
"first": "V",
"middle": [
"K"
],
"last": "Krishnan",
"suffix": ""
}
],
"year": 2018,
"venue": "Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages",
"volume": "",
"issue": "",
"pages": "71--75",
"other_ids": {
"DOI": [
"10.21437/SLTU.2018-15"
]
},
"num": null,
"urls": [],
"raw_text": "Radha Krishna Guntur, R Krishnan, and V.K. Mittal. 2018. Prosodic Analysis of Non-Native South In- dian English Speech. In Proc. The 6th Intl. Work- shop on Spoken Language Technologies for Under- Resourced Languages, pages 71-75.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Human and computer recognition of regional accents and ethnic groups from British English speech",
"authors": [
{
"first": "A",
"middle": [],
"last": "Hanani",
"suffix": ""
},
{
"first": "M",
"middle": [
"J"
],
"last": "Russell",
"suffix": ""
},
{
"first": "M",
"middle": [
"J"
],
"last": "Carey",
"suffix": ""
}
],
"year": 2013,
"venue": "Computer Speech and Language",
"volume": "27",
"issue": "1",
"pages": "59--74",
"other_ids": {
"DOI": [
"10.1016/j.csl.2012.01.003"
]
},
"num": null,
"urls": [],
"raw_text": "A. Hanani, M. J. Russell, and M. J. Carey. 2013. Human and computer recognition of regional ac- cents and ethnic groups from British English speech. Computer Speech and Language, 27(1):59-74.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Spoken Language Characterization",
"authors": [
{
"first": "Mary",
"middle": [
"P"
],
"last": "Harper",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Maxwell",
"suffix": ""
}
],
"year": 2008,
"venue": "",
"volume": "",
"issue": "",
"pages": "797--810",
"other_ids": {
"DOI": [
"10.1007/978-3-540-49127-9_40"
]
},
"num": null,
"urls": [],
"raw_text": "Mary P. Harper and Michael Maxwell. 2008. Spo- ken Language Characterization, pages 797-810.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Perceptual linear predictive (plp) analysis of speech",
"authors": [
{
"first": "Hynek",
"middle": [],
"last": "Hermansky",
"suffix": ""
}
],
"year": 1990,
"venue": "The Journal of the Acoustical Society of America",
"volume": "87",
"issue": "4",
"pages": "1738--1752",
"other_ids": {
"DOI": [
"10.1121/1.399423"
]
},
"num": null,
"urls": [],
"raw_text": "Hynek Hermansky. 1990. Perceptual linear predictive (plp) analysis of speech. The Journal of the Acous- tical Society of America, 87(4):1738-1752.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Automatic Assessment of Non-Native Prosody Annotation, Modelling and Evaluation",
"authors": [
{
"first": "Florian",
"middle": [],
"last": "H\u00f6nig",
"suffix": ""
},
{
"first": "Anton",
"middle": [],
"last": "Batliner",
"suffix": ""
},
{
"first": "Elmar",
"middle": [],
"last": "N\u00f6th",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the International Symposium on Automatic Detection of Errors in Pronunciation Training (IS ADEPT)",
"volume": "",
"issue": "",
"pages": "21--30",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Florian H\u00f6nig, Anton Batliner, and Elmar N\u00f6th. 2012. Automatic Assessment of Non-Native Prosody An- notation, Modelling and Evaluation. Proceedings of the International Symposium on Automatic De- tection of Errors in Pronunciation Training (IS ADEPT), pages 21-30.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Perceptual Recognition Cues in Native English Accent Variation",
"authors": [
{
"first": "A",
"middle": [],
"last": "Ikeno",
"suffix": ""
},
{
"first": "J",
"middle": [
"H L"
],
"last": "Hansen",
"suffix": ""
}
],
"year": 2006,
"venue": "2006 IEEE International Conference on Acoustics Speed and Signal Processing Proceedings",
"volume": "1",
"issue": "",
"pages": "401--404",
"other_ids": {
"DOI": [
"10.1109/ICASSP.2006.1660042"
]
},
"num": null,
"urls": [],
"raw_text": "A. Ikeno and J.H.L. Hansen. 2006. Perceptual Recog- nition Cues in Native English Accent Variation: \"Listener Accent, Perceived Accent, and Compre- hension\". 2006 IEEE International Conference on Acoustics Speed and Signal Processing Proceed- ings, 1:I-401-I-404.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "An overview of text-independent speaker recognition: From features to supervectors",
"authors": [
{
"first": "Tomi",
"middle": [],
"last": "Kinnunen",
"suffix": ""
},
{
"first": "Haizhou",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2010,
"venue": "Speech Communication",
"volume": "52",
"issue": "1",
"pages": "12--40",
"other_ids": {
"DOI": [
"10.1016/j.specom.2009.08.009"
]
},
"num": null,
"urls": [],
"raw_text": "Tomi Kinnunen and Haizhou Li. 2010. An overview of text-independent speaker recognition: From features to supervectors. Speech Communication, 52(1):12 - 40.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Category similarity in multilingual pronunciation training",
"authors": [
{
"first": "Jacques",
"middle": [],
"last": "Koreman",
"suffix": ""
}
],
"year": 2018,
"venue": "Proc",
"volume": "",
"issue": "",
"pages": "2578--2582",
"other_ids": {
"DOI": [
"10.21437/Interspeech.2018-1938"
]
},
"num": null,
"urls": [],
"raw_text": "Jacques Koreman. 2018. Category similarity in multi- lingual pronunciation training. In Proc. Interspeech 2018, pages 2578-2582.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Influence of mother tongue on english accent",
"authors": [
{
"first": "G",
"middle": [],
"last": "Krishna",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Krishnan",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 11th International Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "63--67",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "G. Radha Krishna and R. Krishnan. 2014. Influence of mother tongue on english accent. In Proceedings of the 11th International Conference on Natural Lan- guage Processing, pages 63-67, Goa, India. NLP Association of India.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "An automated system for regional nativity identification of indian speakers from english speech",
"authors": [
{
"first": "G",
"middle": [],
"last": "Krishna",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Krishnan",
"suffix": ""
},
{
"first": "V",
"middle": [
"K"
],
"last": "",
"suffix": ""
}
],
"year": 2019,
"venue": "16th IEEE India Council International Conference INDICON 2019",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "G.Radha Krishna, R.Krishnan, and V.K.Mittal. 2019. An automated system for regional nativity identifi- cation of indian speakers from english speech. In 16th IEEE India Council International Conference INDICON 2019 (Accepted).",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Spoken language recognition: From fundamentals to practice",
"authors": [
{
"first": "Haizhou",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Bin",
"middle": [],
"last": "Ma",
"suffix": ""
},
{
"first": "Kong Aik",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the IEEE",
"volume": "101",
"issue": "5",
"pages": "1136--1159",
"other_ids": {
"DOI": [
"10.1109/JPROC.2012.2237151"
]
},
"num": null,
"urls": [],
"raw_text": "Haizhou Li, Bin Ma, and Kong Aik Lee. 2013. Spoken language recognition: From fundamentals to prac- tice. Proceedings of the IEEE, 101(5):1136-1159.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Intonation classification for L2 English speech using multi-distribution deep neural networks",
"authors": [
{
"first": "Kun",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Xixin",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Helen",
"middle": [],
"last": "Meng",
"suffix": ""
}
],
"year": 2017,
"venue": "Computer Speech and Language",
"volume": "43",
"issue": "",
"pages": "18--33",
"other_ids": {
"DOI": [
"10.1016/j.csl.2016.11.006"
]
},
"num": null,
"urls": [],
"raw_text": "Kun Li, Xixin Wu, and Helen Meng. 2017. Into- nation classification for L2 English speech using multi-distribution deep neural networks. Computer Speech and Language, 43:18-33.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Advances on Speaker Recognition in non Collaborative Environments",
"authors": [
{
"first": "Jes\u00fas Antonio Villalba",
"middle": [],
"last": "L\u00f3pez",
"suffix": ""
}
],
"year": 2014,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jes\u00fas Antonio Villalba L\u00f3pez. 2014. Advances on Speaker Recognition in non Collaborative Environ- ments. page 311.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Text independent speaker identification in multilingual environments",
"authors": [
{
"first": "Iker",
"middle": [],
"last": "Luengo",
"suffix": ""
},
{
"first": "Eva",
"middle": [],
"last": "Navas",
"suffix": ""
},
{
"first": "I\u00f1aki",
"middle": [],
"last": "Sainz",
"suffix": ""
},
{
"first": "Ibon",
"middle": [],
"last": "Saratxaga",
"suffix": ""
},
{
"first": "Jon",
"middle": [],
"last": "Sanchez",
"suffix": ""
}
],
"year": 2008,
"venue": "LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Iker Luengo, Eva Navas, I\u00f1aki Sainz, Ibon Saratxaga, Jon Sanchez, Igor Odriozola, and Inma Hernaez. 2008. Text independent speaker identification in multilingual environments. In LREC 2008.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Multilingual native language identification",
"authors": [
{
"first": "Shervin",
"middle": [],
"last": "Malmasi",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Dras",
"suffix": ""
}
],
"year": 2017,
"venue": "Natural Language Engineering",
"volume": "23",
"issue": "2",
"pages": "",
"other_ids": {
"DOI": [
"10.1017/S1351324915000406"
]
},
"num": null,
"urls": [],
"raw_text": "SHERVIN MALMASI and MARK DRAS. 2017. Mul- tilingual native language identification. Natural Language Engineering, 23(2):163215.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Extraction and representation of prosodic features for language and speaker recognition",
"authors": [
{
"first": "Leena",
"middle": [],
"last": "Mary",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Yegnanarayana",
"suffix": ""
}
],
"year": 2008,
"venue": "Speech Communication",
"volume": "50",
"issue": "10",
"pages": "782--796",
"other_ids": {
"DOI": [
"10.1016/j.specom.2008.04.010"
]
},
"num": null,
"urls": [],
"raw_text": "Leena Mary and B. Yegnanarayana. 2008. Extraction and representation of prosodic features for language and speaker recognition. Speech Communication, 50(10):782-796.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Effect of glottal dynamics in the production of shouted speech",
"authors": [
{
"first": "V",
"middle": [
"K"
],
"last": "Mittal",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Yegnanarayana",
"suffix": ""
}
],
"year": 2013,
"venue": "The Journal of the Acoustical Society of America",
"volume": "133",
"issue": "5",
"pages": "3050--3061",
"other_ids": {
"DOI": [
"10.1121/1.4796110"
]
},
"num": null,
"urls": [],
"raw_text": "V. K. Mittal and B. Yegnanarayana. 2013. Effect of glottal dynamics in the production of shouted speech. The Journal of the Acoustical Society of America, 133(5):3050-3061.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Study of characteristics of aperiodicity in noh voices",
"authors": [
{
"first": "B",
"middle": [],
"last": "Vinay Kumar Mittal",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Yegnanarayana",
"suffix": ""
}
],
"year": 2015,
"venue": "The Journal of the Acoustical Society of America",
"volume": "137",
"issue": "6",
"pages": "3411--3421",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vinay Kumar Mittal and B Yegnanarayana. 2015. Study of characteristics of aperiodicity in noh voices. The Journal of the Acoustical Society of America, 137(6):3411-3421.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Study of the effects of vocal tract constriction on glottal vibration",
"authors": [
{
"first": "B",
"middle": [],
"last": "Vinay Kumar Mittal",
"suffix": ""
},
{
"first": "Peri",
"middle": [],
"last": "Yegnanarayana",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bhaskararao",
"suffix": ""
}
],
"year": 2014,
"venue": "The Journal of the Acoustical Society of America",
"volume": "136",
"issue": "4",
"pages": "1932--1941",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vinay Kumar Mittal, B Yegnanarayana, and Peri Bhaskararao. 2014. Study of the effects of vocal tract constriction on glottal vibration. The Journal of the Acoustical Society of America, 136(4):1932- 1941.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Significance of aperiodicity in the pitch perception of expressive voices",
"authors": [
{
"first": "Bayya",
"middle": [],
"last": "Vinay Kumar Mittal",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Yegnanarayana",
"suffix": ""
}
],
"year": 2014,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vinay Kumar Mittal and Bayya Yegnanarayana. 2014. Significance of aperiodicity in the pitch perception of expressive voices. In INTERSPEECH.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Language-specific influence on phoneme development: French and drehu data",
"authors": [
{
"first": "Julia",
"middle": [],
"last": "Monnin",
"suffix": ""
},
{
"first": "H\u00e9l\u00e8ne",
"middle": [],
"last": "Loevenbruck",
"suffix": ""
}
],
"year": 2010,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Julia Monnin and H\u00e9l\u00e8ne Loevenbruck. 2010. Language-specific influence on phoneme develop- ment: French and drehu data. In INTERSPEECH.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Automatic pronunciation evaluation of foreign speakers using unknown text",
"authors": [
{
"first": "N",
"middle": [],
"last": "Moustroufas",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Digalakis",
"suffix": ""
}
],
"year": 2007,
"venue": "Computer Speech and Language",
"volume": "21",
"issue": "1",
"pages": "219--230",
"other_ids": {
"DOI": [
"10.1016/j.csl.2006.04.001"
]
},
"num": null,
"urls": [],
"raw_text": "N. Moustroufas and V. Digalakis. 2007. Automatic pronunciation evaluation of foreign speakers using unknown text. Computer Speech and Language, 21(1):219-230.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Vassilios Digalakis, and Mitchel Weintraub",
"authors": [
{
"first": "Leonardo",
"middle": [],
"last": "Neumeyer",
"suffix": ""
},
{
"first": "Horacio",
"middle": [],
"last": "Franco",
"suffix": ""
}
],
"year": 2000,
"venue": "Speech Communication",
"volume": "30",
"issue": "2",
"pages": "83--93",
"other_ids": {
"DOI": [
"10.1016/S0167-6393(99)00046-1"
]
},
"num": null,
"urls": [],
"raw_text": "Leonardo Neumeyer, Horacio Franco, Vassilios Di- galakis, and Mitchel Weintraub. 2000. Automatic scoring of pronunciation quality. Speech Communi- cation, 30(2):83-93.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Feature analysis for native language identification",
"authors": [
{
"first": "Sergiu",
"middle": [],
"last": "Nisioi",
"suffix": ""
}
],
"year": 2015,
"venue": "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
"volume": "9041",
"issue": "",
"pages": "644--657",
"other_ids": {
"DOI": [
"10.1007/978-3-319-18111-0_49"
]
},
"num": null,
"urls": [],
"raw_text": "Sergiu Nisioi. 2015. Feature analysis for native lan- guage identification. Lecture Notes in Computer Science (including subseries Lecture Notes in Arti- ficial Intelligence and Lecture Notes in Bioinformat- ics), 9041:644-657.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "A novel approach to detecting non-native speakers and their native language",
"authors": [
{
"first": "Mohamed",
"middle": [],
"last": "Kamal",
"suffix": ""
},
{
"first": "Omar",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Jason",
"middle": [],
"last": "Pelecanos",
"suffix": ""
}
],
"year": 2010,
"venue": "IEEE International Conference on Acoustics, Speech and Signal Processing -Proceedings",
"volume": "",
"issue": "",
"pages": "4398--4401",
"other_ids": {
"DOI": [
"10.1109/ICASSP.2010.5495628"
]
},
"num": null,
"urls": [],
"raw_text": "Mohamed Kamal Omar and Jason Pelecanos. 2010. A novel approach to detecting non-native speakers and their native language. ICASSP, IEEE International Conference on Acoustics, Speech and Signal Pro- cessing -Proceedings, pages 4398-4401.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "Improving sub-phone modeling for better native language identification with non-native english speech",
"authors": [
{
"first": "Hillary",
"middle": [
"R"
],
"last": "Lange",
"suffix": ""
},
{
"first": "Frank",
"middle": [
"K"
],
"last": "Molloy",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Soong",
"suffix": ""
}
],
"year": 2017,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lange, Hillary R. Molloy, and Frank K. Soong. 2017. Improving sub-phone modeling for better na- tive language identification with non-native english speech. In INTERSPEECH.",
"links": null
},
"BIBREF42": {
"ref_id": "b42",
"title": "Robust textindependent speaker identification using gaussian mixture speaker models",
"authors": [
{
"first": "D",
"middle": [
"A"
],
"last": "Reynolds",
"suffix": ""
},
{
"first": "R",
"middle": [
"C"
],
"last": "Rose",
"suffix": ""
}
],
"year": 1995,
"venue": "IEEE Transactions on Speech and Audio Processing",
"volume": "3",
"issue": "1",
"pages": "72--83",
"other_ids": {
"DOI": [
"10.1109/89.365379"
]
},
"num": null,
"urls": [],
"raw_text": "D. A. Reynolds and R. C. Rose. 1995. Robust text- independent speaker identification using gaussian mixture speaker models. IEEE Transactions on Speech and Audio Processing, 3(1):72-83.",
"links": null
},
"BIBREF43": {
"ref_id": "b43",
"title": "Study of acoustic correlates of english lexical stress produced by native (l1) bengali speakers compared to native (l1) english speakers",
"authors": [
{
"first": "Nath",
"middle": [],
"last": "Shambhu",
"suffix": ""
},
{
"first": "Shyamal",
"middle": [],
"last": "Saha",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kr",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Das Mandal",
"suffix": ""
}
],
"year": 2015,
"venue": "INTER-SPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shambhu Nath Saha and Shyamal Kr. Das Mandal. 2015. Study of acoustic correlates of english lexi- cal stress produced by native (l1) bengali speakers compared to native (l1) english speakers. In INTER- SPEECH.",
"links": null
},
"BIBREF44": {
"ref_id": "b44",
"title": "Paralinguistics in speech and language -State-of-the-art and the challenge",
"authors": [
{
"first": "Bj\u00f6rn",
"middle": [],
"last": "Schuller",
"suffix": ""
},
{
"first": "Stefan",
"middle": [],
"last": "Steidl",
"suffix": ""
},
{
"first": "Anton",
"middle": [],
"last": "Batliner",
"suffix": ""
},
{
"first": "Felix",
"middle": [],
"last": "Burkhardt",
"suffix": ""
},
{
"first": "Laurence",
"middle": [],
"last": "Devillers",
"suffix": ""
},
{
"first": "Christian",
"middle": [],
"last": "M\u00fcller",
"suffix": ""
},
{
"first": "Shrikanth",
"middle": [],
"last": "Narayanan",
"suffix": ""
}
],
"year": 2013,
"venue": "Computer Speech and Language",
"volume": "27",
"issue": "1",
"pages": "4--39",
"other_ids": {
"DOI": [
"10.1016/j.csl.2012.02.005"
]
},
"num": null,
"urls": [],
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Felix Burkhardt, Laurence Devillers, Christian M\u00fcller, and Shrikanth Narayanan. 2013. Paralinguistics in speech and language -State-of-the-art and the chal- lenge. Computer Speech and Language, 27(1):4-39.",
"links": null
},
"BIBREF45": {
"ref_id": "b45",
"title": "Modeling prosodic feature sequences for speaker recognition",
"authors": [
{
"first": "E",
"middle": [],
"last": "Shriberg",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Ferrer",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Kajarekar",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Venkataraman",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Stolcke",
"suffix": ""
}
],
"year": 2005,
"venue": "Speech Communication",
"volume": "46",
"issue": "3-4",
"pages": "455--472",
"other_ids": {
"DOI": [
"10.1016/j.specom.2005.02.018"
]
},
"num": null,
"urls": [],
"raw_text": "E. Shriberg, L. Ferrer, S. Kajarekar, A. Venkataraman, and A. Stolcke. 2005. Modeling prosodic feature sequences for speaker recognition. Speech Commu- nication, 46(3-4):455-472.",
"links": null
},
"BIBREF46": {
"ref_id": "b46",
"title": "Better nonnative intonation scores through prosodic theory",
"authors": [
{
"first": "Joseph",
"middle": [],
"last": "Tepperman",
"suffix": ""
},
{
"first": "Shrikanth",
"middle": [],
"last": "Narayanan",
"suffix": ""
}
],
"year": 2008,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joseph Tepperman and Shrikanth Narayanan. 2008. Better nonnative intonation scores through prosodic theory. In INTERSPEECH.",
"links": null
},
"BIBREF47": {
"ref_id": "b47",
"title": "Non-Native Differences in Prosodic-Construction Use",
"authors": [
{
"first": "G",
"middle": [],
"last": "Nigel",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Paola",
"middle": [],
"last": "Org",
"suffix": ""
},
{
"first": "Amanda",
"middle": [],
"last": "Gallardo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Stent",
"suffix": ""
}
],
"year": 2017,
"venue": "Dialogue & Discourse",
"volume": "8",
"issue": "1",
"pages": "1--30",
"other_ids": {
"DOI": [
"10.5087/dad.2017.101"
]
},
"num": null,
"urls": [],
"raw_text": "Nigel G Ward, Nigelward@acm Org, Paola Gallardo, and Amanda Stent. 2017. Non-Native Differences in Prosodic-Construction Use. Dialogue & Discourse, 8(1):1-30.",
"links": null
},
"BIBREF48": {
"ref_id": "b48",
"title": "Feature subset selection for improved native accent identification",
"authors": [
{
"first": "Tingyao",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Jacques",
"middle": [],
"last": "Duchateau",
"suffix": ""
},
{
"first": "Jean",
"middle": [
"Pierre"
],
"last": "Martens",
"suffix": ""
},
{
"first": "Dirk",
"middle": [],
"last": "Van Compernolle",
"suffix": ""
}
],
"year": 2010,
"venue": "Speech Communication",
"volume": "52",
"issue": "2",
"pages": "83--98",
"other_ids": {
"DOI": [
"10.1016/j.specom.2009.08.010"
]
},
"num": null,
"urls": [],
"raw_text": "Tingyao Wu, Jacques Duchateau, Jean Pierre Martens, and Dirk Van Compernolle. 2010. Feature subset selection for improved native accent identification. Speech Communication, 52(2):83-98.",
"links": null
},
"BIBREF50": {
"ref_id": "b50",
"title": "Integration of complementary acoustic features for speaker recognition",
"authors": [
{
"first": "Nengheng",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Tan",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "P",
"middle": [
"C"
],
"last": "Ching",
"suffix": ""
}
],
"year": 2007,
"venue": "IEEE Signal Processing Letters",
"volume": "14",
"issue": "3",
"pages": "181--184",
"other_ids": {
"DOI": [
"10.1109/LSP.2006.884031"
]
},
"num": null,
"urls": [],
"raw_text": "Nengheng Zheng, Tan Lee, and P. C. Ching. 2007. Integration of complementary acoustic features for speaker recognition. IEEE Signal Processing Let- ters, 14(3):181-184.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"text": "Distribution of MFCC Coefficients C 0 versus C 1 for non-native English speech by four South Indian language speakers against native English speech.",
"num": null,
"type_str": "figure"
},
"FIGREF1": {
"uris": null,
"text": "Confusion Matrix for SVM classification of South-Indian English including native English. Note: TPR is True Positive Rate, FNR is False Negative Rate.Table5: Non-native Regional English Accent Classification accuracies using (a) k-nearest neighbourhood (KNN), (b) Linear Discriminant (LDA), and (c) SVM Classifier (a) KNN (b) LDA (c) SVM Accuracy 86",
"num": null,
"type_str": "figure"
},
"FIGREF2": {
"uris": null,
"text": ": ROC curve for SVM classification of Nonnative English speech by Kannada speakers.",
"num": null,
"type_str": "figure"
},
"FIGREF3": {
"uris": null,
"text": "Confusion Matrix for SVM classification of English by speakers of KAN, TAM, and TEL. Note: TPR is True Positive Rate, FNR is False Negative Rate.",
"num": null,
"type_str": "figure"
},
"TABREF0": {
"type_str": "table",
"html": null,
"text": "Summary of data used for training and testing: (a) attributes (b) values for training set and (c) values for testing set",
"num": null,
"content": "<table><tr><td colspan=\"2\">(a) Attributes</td><td colspan=\"2\">(b) Training set (c) Test set</td></tr><tr><td colspan=\"2\">Total number</td><td>60</td><td>75</td></tr><tr><td>of speakers</td><td/><td/><td/></tr><tr><td colspan=\"2\">Speakers per</td><td>20</td><td>25</td></tr><tr><td colspan=\"2\">language group</td><td/><td/></tr><tr><td colspan=\"2\">(KAN, MAL*,</td><td/><td/></tr><tr><td colspan=\"2\">TAM, TEL)</td><td/><td/></tr><tr><td>Speech</td><td>Du-</td><td>300 sec</td><td>60 sec</td></tr><tr><td>ration</td><td>per</td><td/><td/></tr><tr><td>speaker</td><td/><td/><td/></tr><tr><td colspan=\"4\">Note: *MAL-Malayalam data set is used only in</td></tr><tr><td colspan=\"4\">tests related to cepstral features.</td></tr></table>"
},
"TABREF1": {
"type_str": "table",
"html": null,
"text": "Template of file naming for data recording",
"num": null,
"content": "<table><tr><td>Native</td></tr><tr><td>language Name Age / Sex File Name</td></tr></table>"
},
"TABREF2": {
"type_str": "table",
"html": null,
"text": "Summary of speaker traits and related speech features(Day and Nandi, 2007).",
"num": null,
"content": "<table><tr><td colspan=\"2\">Speech char-</td><td colspan=\"3\">Speaker trait Speech feature</td></tr><tr><td colspan=\"2\">acteristic</td><td/><td/></tr><tr><td colspan=\"2\">Lexical, Syn-</td><td colspan=\"2\">Socio eco-</td><td>Vocabulary,</td></tr><tr><td>tactic</td><td/><td>nomic</td><td/><td>Word</td></tr><tr><td colspan=\"2\">(Idiolect,</td><td colspan=\"2\">Educational</td><td>arrangement</td></tr><tr><td colspan=\"2\">Semantics,</td><td colspan=\"2\">status (Lan-</td><td>& grammatical</td></tr><tr><td>Pronun-</td><td/><td>guage</td><td>use</td><td>cues.</td></tr><tr><td>ciations,</td><td/><td colspan=\"2\">and sentence</td></tr><tr><td colspan=\"2\">dictions, Id-</td><td colspan=\"2\">construction)</td></tr><tr><td colspan=\"2\">iosyncrasies)</td><td/><td/></tr><tr><td>Prosodic</td><td/><td colspan=\"2\">Personality</td><td>Durational fea-</td></tr><tr><td colspan=\"2\">(Rhythm,</td><td colspan=\"2\">type, Parental</td><td>tures.</td><td>Pitch</td></tr><tr><td colspan=\"2\">Intonation,</td><td>influences</td><td/><td>dynamics, En-</td></tr><tr><td colspan=\"2\">Articulation</td><td/><td/><td>ergy (likely to</td></tr><tr><td>rate etc.)</td><td/><td/><td/><td>be Text / time</td></tr><tr><td/><td/><td/><td/><td>dependent).</td></tr><tr><td>Low</td><td>level</td><td colspan=\"2\">Anatomical</td><td>Short-time spec-</td></tr><tr><td>acoustic</td><td/><td colspan=\"2\">structure of</td><td>trum, Predictor</td></tr><tr><td>features</td><td/><td colspan=\"2\">speaker's vo-</td><td>coefficients, In-</td></tr><tr><td/><td/><td colspan=\"2\">cal apparatus</td><td>tensity, Pitch.</td></tr></table>"
},
"TABREF3": {
"type_str": "table",
"html": null,
"text": "Major text-independent features used in prosodic analysis.",
"num": null,
"content": "<table><tr><td colspan=\"2\">Prosodic</td><td>Factors that influences speech</td></tr><tr><td colspan=\"2\">features</td><td/></tr><tr><td colspan=\"2\">Dynamics</td><td>Identity of sound unit, its position</td></tr><tr><td>of</td><td>F 0</td><td>from phrase, word; Speaking style;</td></tr><tr><td colspan=\"2\">contour</td><td>Intonation rules; Type of sentence</td></tr><tr><td/><td/><td>(Interrogative, Declarative)</td></tr><tr><td colspan=\"2\">Intonation,</td><td/></tr><tr><td colspan=\"2\">Rhythm,</td><td/></tr><tr><td>Stress</td><td/><td/></tr></table>"
},
"TABREF4": {
"type_str": "table",
"html": null,
"text": "Mean (\u00b5) and SD (\u03c3) of Pitch variation of single speaker from three groups of native speakers when speaking (a) Native Language (NL) (b) English (c) Other South Indian language (OSIL)",
"num": null,
"content": "<table><tr><td/><td/><td colspan=\"2\">LANGUAGE SPOKEN</td></tr><tr><td>L1</td><td>\u00b5</td><td>(a) NL \u03c3</td><td>(b) English (c) OSIL \u00b5 \u03c3 \u00b5 \u03c3</td></tr><tr><td colspan=\"4\">Kan 214 32.2 254 32.3 235 32.4</td></tr><tr><td colspan=\"4\">Tam 227 21.7 248 28.9 230 30.6</td></tr><tr><td>Tel</td><td colspan=\"3\">133 21.5 157 22.9 150 26.3</td></tr></table>"
},
"TABREF5": {
"type_str": "table",
"html": null,
"text": "Percentage increase in Standard Deviation of pitch contour from native language speech to English speech (using two non-overlapping sets of 20 speakers from each native language group Kannada, Tamil, and Telugu).",
"num": null,
"content": "<table/>"
}
}
}
} |